hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
0ba1776eabece1be6715c779db87a67a1977b6ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
int n = 100;
double *h_a;
double *h_b;
double *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n*sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)n/blockSize);
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 0ba1776eabece1be6715c779db87a67a1977b6ce.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
int n = 100;
double *h_a;
double *h_b;
double *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n*sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)n/blockSize);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
a8b629010cefcf0c88af1cfc237e37ee66c5dc6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __host__ void print(float *result)
{
for(int k = 0; k < 3; k++)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
printf("%f ",result[(i*N + j)*3 + k]);
printf("\n");
}
printf("\n");
}
}
__global__ void histogram_creation(int *A, int *hist, int no_of_threads) {
int global_x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int local_hist[N+1];
for(int i = threadIdx.x; i<=N; i = i + (blockDim.x ) ){
local_hist[i] = 0;
}
__syncthreads();
for(int i = global_x; i <= M; i = i + (blockDim.x * no_of_threads)) {
atomicAdd(&local_hist[A[i]],1);
}
__syncthreads();
for(int i = threadIdx.x ; i <= N; i = i + (blockDim.x) ) {
atomicAdd(&hist[i],local_hist[i]);
printf("%d histogram_local %d \n",local_hist[i],i);
}
__syncthreads();
} | a8b629010cefcf0c88af1cfc237e37ee66c5dc6a.cu | #include "includes.h"
__device__ __host__ void print(float *result)
{
for(int k = 0; k < 3; k++)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
printf("%f ",result[(i*N + j)*3 + k]);
printf("\n");
}
printf("\n");
}
}
__global__ void histogram_creation(int *A, int *hist, int no_of_threads) {
int global_x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int local_hist[N+1];
for(int i = threadIdx.x; i<=N; i = i + (blockDim.x ) ){
local_hist[i] = 0;
}
__syncthreads();
for(int i = global_x; i <= M; i = i + (blockDim.x * no_of_threads)) {
atomicAdd(&local_hist[A[i]],1);
}
__syncthreads();
for(int i = threadIdx.x ; i <= N; i = i + (blockDim.x) ) {
atomicAdd(&hist[i],local_hist[i]);
printf("%d histogram_local %d \n",local_hist[i],i);
}
__syncthreads();
} |
aa566c82fd5b1e6e5ced9bedab953e6df20e0d46.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <MatKernel.hpp>
typedef float (*fntype)(float);
typedef float (*optype)(float,float);
__device__ float link_linear(float a) {return a;}
__device__ float link_logistic(float a) {return log(a/(1.0f - a));}
__device__ float mean_linear(float a) {return a;}
__device__ float mean_logistic(float a) {
if (a > 20.0f) {
return 1.0f;
} else if (a < -80.0f) {
return 0.0f;
} else {
return 1.0f/(1.0f + exp(-a));
}
}
__device__ float deriv_linear(float a, float b) {return b-a;}
__device__ float deriv_logistic(float a, float b) {return b-a;}
__device__ float deriv_maxp(float p, float t) {return (2.0f*t - 1.0f)*p*(1.0f-p);}
__device__ float deriv_svm(float p, float t) {
float tt = 2 * t - 1;
return (p * tt < 1.0f) ? tt : 0.0f;
}
#define EPS 1.0e-10f
__device__ float ll_linear(float a, float t) {return (t-a)*(a-t);}
__device__ float ll_logistic(float a, float b) {return log(a * b + (1.0f - a) * (1.0f - b) + EPS);}
__device__ float ll_maxp(float a, float t) {return a * t + (1.0f - a) * (1.0f - t) - 1.0f;}
__device__ float ll_svm(float p, float t) {
float tt = 2 * t - 1;
return min(0.0f, tt * p - 1);
}
__device__ const fntype linkfns[] = {
link_linear,
link_logistic,
link_logistic,
link_linear};
__device__ const fntype meanfns[] = {
mean_linear,
mean_logistic,
mean_logistic,
mean_linear};
__device__ const optype derivfns[] = {
deriv_linear,
deriv_logistic,
deriv_maxp,
deriv_svm};
__device__ const optype llfns[] = {
ll_linear,
ll_logistic,
ll_maxp,
ll_svm};
typedef double (*dfntype)(double);
typedef double (*doptype)(double,double);
__device__ double dlink_linear(double a) {return a;}
__device__ double dlink_logistic(double a) {return log(a/(1.0 - a));}
__device__ double dmean_linear(double a) {return a;}
__device__ double dmean_logistic(double a) {
double tmp;
if (a > 0) {
tmp = exp(-a);
return 1.0/(1.0 + tmp);
} else {
tmp = exp(a);
return tmp/(1.0 + tmp);
}
}
__device__ double dderiv_linear(double a, double b) {return b-a;}
__device__ double dderiv_logistic(double a, double b) {return b-a;}
__device__ double dderiv_maxp(double p, double t) {return (2.0*t - 1.0f)*p*(1.0-p);}
__device__ double dderiv_svm(double p, double t) {
double tt = 2 * t - 1;
return (p * tt < 1.0) ? tt : 0.0;
}
__device__ double dll_linear(double a, double t) {return (t-a)*(a-t);}
__device__ double dll_logistic(double a, double b) {return log(a * b + (1.0 - a) * (1.0 - b) + EPS);}
__device__ double dll_maxp(double a, double t) {return a * t + (1.0 - a) * (1.0 - t) - 1.0;}
__device__ double dll_svm(double p, double t) {
double tt = 2 * t - 1;
return min(0.0, tt * p - 1);
}
__device__ const dfntype dlinkfns[] = {
dlink_linear,
dlink_logistic,
dlink_logistic,
dlink_linear};
__device__ const dfntype dmeanfns[] = {
dmean_linear,
dmean_logistic,
dmean_logistic,
dmean_linear};
__device__ const doptype dderivfns[] = {
dderiv_linear,
dderiv_logistic,
dderiv_maxp,
dderiv_svm};
__device__ const doptype dllfns[] = {
dll_linear,
dll_logistic,
dll_maxp,
dll_svm};
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
__global__ void __apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = meanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_preds), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = linkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_links), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = llfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_lls), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = derivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_derivs), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dmeanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dpreds), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dlinkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dlinks), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dllfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dlls), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dderivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dderivs), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad;
Sumsq[ihere] += grad * grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ihere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
__global__ void __multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float aval, grad;
int i, j, ihere, jhere;
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
__global__ void __multADAGradx(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float aval, grad;
int i, j, ihere, jhere;
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
i = threadIdx.x;
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(256, max(1, 1 + (ncols-1)/nt));
hipLaunchKernelGGL(( __multADAGradx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
} else {
int nthreads = min(1024, 32*(1+(nrows-1)/32));
int nblocks = min(128, ncols);
hipLaunchKernelGGL(( __multADAGrad), dim3(nblocks),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve,
float *ts, int nts, float *lr, int nlr, float eps, int doupdate) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom;
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
newss = (nw * umval * umval) + (1 - nw) * sqval;
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? sqrtf(newss) : powf(newss, veval);
denom = denom * tsval + eps;
mmval += (umval / denom) * lrval;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float eps, int doupdate) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __ADAGrad), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, mm, um, ssq, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, eps, doupdate);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
| aa566c82fd5b1e6e5ced9bedab953e6df20e0d46.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <MatKernel.hpp>
typedef float (*fntype)(float);
typedef float (*optype)(float,float);
__device__ float link_linear(float a) {return a;}
__device__ float link_logistic(float a) {return log(a/(1.0f - a));}
__device__ float mean_linear(float a) {return a;}
__device__ float mean_logistic(float a) {
if (a > 20.0f) {
return 1.0f;
} else if (a < -80.0f) {
return 0.0f;
} else {
return 1.0f/(1.0f + exp(-a));
}
}
__device__ float deriv_linear(float a, float b) {return b-a;}
__device__ float deriv_logistic(float a, float b) {return b-a;}
__device__ float deriv_maxp(float p, float t) {return (2.0f*t - 1.0f)*p*(1.0f-p);}
__device__ float deriv_svm(float p, float t) {
float tt = 2 * t - 1;
return (p * tt < 1.0f) ? tt : 0.0f;
}
#define EPS 1.0e-10f
__device__ float ll_linear(float a, float t) {return (t-a)*(a-t);}
__device__ float ll_logistic(float a, float b) {return log(a * b + (1.0f - a) * (1.0f - b) + EPS);}
__device__ float ll_maxp(float a, float t) {return a * t + (1.0f - a) * (1.0f - t) - 1.0f;}
__device__ float ll_svm(float p, float t) {
float tt = 2 * t - 1;
return min(0.0f, tt * p - 1);
}
__device__ const fntype linkfns[] = {
link_linear,
link_logistic,
link_logistic,
link_linear};
__device__ const fntype meanfns[] = {
mean_linear,
mean_logistic,
mean_logistic,
mean_linear};
__device__ const optype derivfns[] = {
deriv_linear,
deriv_logistic,
deriv_maxp,
deriv_svm};
__device__ const optype llfns[] = {
ll_linear,
ll_logistic,
ll_maxp,
ll_svm};
typedef double (*dfntype)(double);
typedef double (*doptype)(double,double);
__device__ double dlink_linear(double a) {return a;}
__device__ double dlink_logistic(double a) {return log(a/(1.0 - a));}
__device__ double dmean_linear(double a) {return a;}
__device__ double dmean_logistic(double a) {
double tmp;
if (a > 0) {
tmp = exp(-a);
return 1.0/(1.0 + tmp);
} else {
tmp = exp(a);
return tmp/(1.0 + tmp);
}
}
__device__ double dderiv_linear(double a, double b) {return b-a;}
__device__ double dderiv_logistic(double a, double b) {return b-a;}
__device__ double dderiv_maxp(double p, double t) {return (2.0*t - 1.0f)*p*(1.0-p);}
__device__ double dderiv_svm(double p, double t) {
double tt = 2 * t - 1;
return (p * tt < 1.0) ? tt : 0.0;
}
__device__ double dll_linear(double a, double t) {return (t-a)*(a-t);}
__device__ double dll_logistic(double a, double b) {return log(a * b + (1.0 - a) * (1.0 - b) + EPS);}
__device__ double dll_maxp(double a, double t) {return a * t + (1.0 - a) * (1.0 - t) - 1.0;}
__device__ double dll_svm(double p, double t) {
double tt = 2 * t - 1;
return min(0.0, tt * p - 1);
}
__device__ const dfntype dlinkfns[] = {
dlink_linear,
dlink_logistic,
dlink_logistic,
dlink_linear};
__device__ const dfntype dmeanfns[] = {
dmean_linear,
dmean_logistic,
dmean_logistic,
dmean_linear};
__device__ const doptype dderivfns[] = {
dderiv_linear,
dderiv_logistic,
dderiv_maxp,
dderiv_svm};
__device__ const doptype dllfns[] = {
dll_linear,
dll_logistic,
dll_maxp,
dll_svm};
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
__global__ void __apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = meanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_preds<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = linkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_links<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = llfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_lls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = derivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_derivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dmeanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dpreds<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dlinkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dlinks<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dllfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dlls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dderivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dderivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad;
Sumsq[ihere] += grad * grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ihere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
__global__ void __multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float aval, grad;
int i, j, ihere, jhere;
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
__global__ void __multADAGradx(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float aval, grad;
int i, j, ihere, jhere;
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
i = threadIdx.x;
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(256, max(1, 1 + (ncols-1)/nt));
__multADAGradx<<<nblocks,threadDim>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
} else {
int nthreads = min(1024, 32*(1+(nrows-1)/32));
int nblocks = min(128, ncols);
__multADAGrad<<<nblocks,nthreads>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve,
float *ts, int nts, float *lr, int nlr, float eps, int doupdate) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom;
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
newss = (nw * umval * umval) + (1 - nw) * sqval;
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? sqrtf(newss) : powf(newss, veval);
denom = denom * tsval + eps;
mmval += (umval / denom) * lrval;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float eps, int doupdate) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__ADAGrad<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, eps, doupdate);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
4137651fbebb3a9a936b43812e4a18b1e119d718.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iH, const int iW,
const int oH, const int oW,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW) {
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iHposition = isNCHW ? 2 : 1;
if(isPHasymm)
newShape[iHposition] += 1;
if(isPWasymm)
newShape[iHposition + 1] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void checkConv3dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iD, const int iH, const int iW,
const int oD, const int oH, const int oW,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW) {
const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD);
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPDasymm = pD != (pDsum - pD);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPDasymm && !isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iDposition = isNCDHW ? 2 : 1;
if(isPDasymm)
newShape[iDposition] += 1;
if(isPHasymm)
newShape[iDposition + 1] += 1;
if(isPWasymm)
newShape[iDposition + 2] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCDHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3), 0,input->sizeAt(4)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,input->sizeAt(3), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void pooling2dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling2dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dBpCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int zShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(z, format, cudnnDataType(output->dataType()), numDims, zShape);
else
err = cudnnSetTensorNdDescriptor(z, cudnnDataType(output->dataType()), numDims, zShape, zStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int dzShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(dz, format, cudnnDataType(gradO->dataType()), numDims, dzShape);
else
err = cudnnSetTensorNdDescriptor(dz, cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
// cudnn maxpool2d_bp api requires ff output as one of input arguments
if(mode == CUDNN_POOLING_MAX) {
NDArray temp(gradO);
NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp});
// run ff calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO, &temp});
}
else {
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dBpCUDNN: hipStreamSynchronize failed !", cudaErr);
}
}
}
}
| 4137651fbebb3a9a936b43812e4a18b1e119d718.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iH, const int iW,
const int oH, const int oW,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW) {
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iHposition = isNCHW ? 2 : 1;
if(isPHasymm)
newShape[iHposition] += 1;
if(isPWasymm)
newShape[iHposition + 1] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void checkConv3dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iD, const int iH, const int iW,
const int oD, const int oH, const int oW,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW) {
const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD);
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPDasymm = pD != (pDsum - pD);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPDasymm && !isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iDposition = isNCDHW ? 2 : 1;
if(isPDasymm)
newShape[iDposition] += 1;
if(isPHasymm)
newShape[iDposition + 1] += 1;
if(isPWasymm)
newShape[iDposition + 2] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCDHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3), 0,input->sizeAt(4)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,input->sizeAt(3), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void pooling2dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling2dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int zShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(z, format, cudnnDataType(output->dataType()), numDims, zShape);
else
err = cudnnSetTensorNdDescriptor(z, cudnnDataType(output->dataType()), numDims, zShape, zStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int dzShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(dz, format, cudnnDataType(gradO->dataType()), numDims, dzShape);
else
err = cudnnSetTensorNdDescriptor(dz, cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
// cudnn maxpool2d_bp api requires ff output as one of input arguments
if(mode == CUDNN_POOLING_MAX) {
NDArray temp(gradO);
NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp});
// run ff calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO, &temp});
}
else {
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
}
}
}
}
|
4241d13b4daa9aa4f27ed9e3916f579f0f288a73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "../shape/head.h"
}
__device__ void dev_lghtcrv_splint(double *xa,double *ya,double *y2a,int n,double x,double *y)
{
int klo,khi,k;
double h,b,a;
klo=1;
khi=n;
while (khi-klo > 1) {
k=(khi+klo) >> 1;
if (xa[k] > x) khi=k;
else klo=k;
}
h=xa[khi]-xa[klo];
if (h == 0.0) printf("Bad XA input to routine SPLINT (dev_lghtcrv_splint)\n");
a=(xa[khi]-x)/h;
b=(x-xa[klo])/h;
*y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0;
}
__global__ void lghtcrv_splint_krnl(struct dat_t *ddat, int set, int n, int ncalc)
{
/* Single-threaded kernel where n = lghtcrv->n */
/* Parameters:
* double *xa - lghtcrv->x
* double *ya - lghtcrv->y
* double *y2a - lghtcrv->y2
* int n - ncalc
* double x - lghtcrv->t[i][lghtcrv->v0]
* double *y - lghtcrv->fit[i] *
*
* This is a wrapper kernel that launches the device function. This is done
* for memory access reasons. */
int i, v;
double interp;
if (threadIdx.x == 0) {
for (i=1; i<=n; i++) {
ddat->set[set].desc.lghtcrv.fit[i] = 0.0;
for (v=0; v<ddat->set[set].desc.lghtcrv.nviews; v++) {
dev_lghtcrv_splint(ddat->set[set].desc.lghtcrv.x,
ddat->set[set].desc.lghtcrv.y,
ddat->set[set].desc.lghtcrv.y2,
ncalc,
ddat->set[set].desc.lghtcrv.t[i][ddat->set[set].desc.lghtcrv.v0],
&interp);
ddat->set[set].desc.lghtcrv.fit[i] += interp;
}
ddat->set[set].desc.lghtcrv.fit[i] /= ddat->set[set].desc.lghtcrv.nviews;
}
}
}
| 4241d13b4daa9aa4f27ed9e3916f579f0f288a73.cu | extern "C" {
#include "../shape/head.h"
}
__device__ void dev_lghtcrv_splint(double *xa,double *ya,double *y2a,int n,double x,double *y)
{
int klo,khi,k;
double h,b,a;
klo=1;
khi=n;
while (khi-klo > 1) {
k=(khi+klo) >> 1;
if (xa[k] > x) khi=k;
else klo=k;
}
h=xa[khi]-xa[klo];
if (h == 0.0) printf("Bad XA input to routine SPLINT (dev_lghtcrv_splint)\n");
a=(xa[khi]-x)/h;
b=(x-xa[klo])/h;
*y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0;
}
__global__ void lghtcrv_splint_krnl(struct dat_t *ddat, int set, int n, int ncalc)
{
/* Single-threaded kernel where n = lghtcrv->n */
/* Parameters:
* double *xa - lghtcrv->x
* double *ya - lghtcrv->y
* double *y2a - lghtcrv->y2
* int n - ncalc
* double x - lghtcrv->t[i][lghtcrv->v0]
* double *y - lghtcrv->fit[i] *
*
* This is a wrapper kernel that launches the device function. This is done
* for memory access reasons. */
int i, v;
double interp;
if (threadIdx.x == 0) {
for (i=1; i<=n; i++) {
ddat->set[set].desc.lghtcrv.fit[i] = 0.0;
for (v=0; v<ddat->set[set].desc.lghtcrv.nviews; v++) {
dev_lghtcrv_splint(ddat->set[set].desc.lghtcrv.x,
ddat->set[set].desc.lghtcrv.y,
ddat->set[set].desc.lghtcrv.y2,
ncalc,
ddat->set[set].desc.lghtcrv.t[i][ddat->set[set].desc.lghtcrv.v0],
&interp);
ddat->set[set].desc.lghtcrv.fit[i] += interp;
}
ddat->set[set].desc.lghtcrv.fit[i] /= ddat->set[set].desc.lghtcrv.nviews;
}
}
}
|
9d0358475984ea1578d388266299e9f1b6350f0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
#include <map>
#include <vector>
// Max Threads per block in GeForce 210
#define TxB 512
__global__
void image_equalization_kernel(const uchar4* const rgbaImage,
uchar4* const outputImage,
int numRows, int numCols)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
uchar4 px = rgbaImage[i]; // thread pixel to process
//printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z );
__shared__ unsigned int channelR[256];
__shared__ unsigned int channelG[256];
__shared__ unsigned int channelB[256];
__shared__ unsigned int channelRacc[256];
__shared__ unsigned int channelGacc[256];
__shared__ unsigned int channelBacc[256];
__shared__ unsigned char channelReq[256];
__shared__ unsigned char channelGeq[256];
__shared__ unsigned char channelBeq[256];
if( i == 0 ){
for(int q = 0; q < 256; q++ ){
channelR[q] = 0;
channelG[q] = 0;
channelB[q] = 0;
channelRacc[q] = 0;
channelGacc[q] = 0;
channelBacc[q] = 0;
}
}
__syncthreads();
//Calcular histograma
atomicAdd( &channelR[ px.x ] , 1);
atomicAdd( &channelG[ px.y ] , 1);
atomicAdd( &channelB[ px.z ] , 1);
__syncthreads();
if (i == 0){
//Impresin del histograma
// printf("Canal R \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelR[k]);
// printf("Canal G \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelG[k]);
// printf("Canal B \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelB[k]);
//Sumas acumuladas
channelRacc[0] = channelR[0];
channelGacc[0] = channelG[0];
channelBacc[0] = channelB[0];
for( int j = 1; j < 256; j++ ){
channelRacc[j] = channelRacc[j-1] + channelR[j];
channelGacc[j] = channelGacc[j-1] + channelG[j];
channelBacc[j] = channelBacc[j-1] + channelB[j];
}
// printf("Canal R acc \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelRacc[k]);
}
__syncthreads();
//Ecualizamos con los primeros 256 hilos.
if(i < 256){
channelReq[i] = round((float)channelRacc[i] * 255.0/( (float)numRows*numCols ) );
channelGeq[i] = round((float)channelGacc[i] * 255.0/( (float)numRows*numCols ) );
channelBeq[i] = round((float)channelBacc[i] * 255.0/( (float)numRows*numCols ) );
}
__syncthreads();
outputImage[i].x = channelReq[px.x];
outputImage[i].y = channelGeq[px.y];
outputImage[i].z = channelBeq[px.z];
__syncthreads();
//printf( "Despues: R: %i G: %i B %i \n",outputImage[i].x,outputImage[i].y,outputImage[i].z );
}
}
void image_equalization(uchar4 * const d_rgbaImage,
uchar4* const d_outputImage, size_t numRows, size_t numCols)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
hipLaunchKernelGGL(( image_equalization_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_outputImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | 9d0358475984ea1578d388266299e9f1b6350f0d.cu | #include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
#include <map>
#include <vector>
// Max Threads per block in GeForce 210
#define TxB 512
__global__
void image_equalization_kernel(const uchar4* const rgbaImage,
uchar4* const outputImage,
int numRows, int numCols)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
uchar4 px = rgbaImage[i]; // thread pixel to process
//printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z );
__shared__ unsigned int channelR[256];
__shared__ unsigned int channelG[256];
__shared__ unsigned int channelB[256];
__shared__ unsigned int channelRacc[256];
__shared__ unsigned int channelGacc[256];
__shared__ unsigned int channelBacc[256];
__shared__ unsigned char channelReq[256];
__shared__ unsigned char channelGeq[256];
__shared__ unsigned char channelBeq[256];
if( i == 0 ){
for(int q = 0; q < 256; q++ ){
channelR[q] = 0;
channelG[q] = 0;
channelB[q] = 0;
channelRacc[q] = 0;
channelGacc[q] = 0;
channelBacc[q] = 0;
}
}
__syncthreads();
//Calcular histograma
atomicAdd( &channelR[ px.x ] , 1);
atomicAdd( &channelG[ px.y ] , 1);
atomicAdd( &channelB[ px.z ] , 1);
__syncthreads();
if (i == 0){
//Impresión del histograma
// printf("Canal R \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelR[k]);
// printf("Canal G \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelG[k]);
// printf("Canal B \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelB[k]);
//Sumas acumuladas
channelRacc[0] = channelR[0];
channelGacc[0] = channelG[0];
channelBacc[0] = channelB[0];
for( int j = 1; j < 256; j++ ){
channelRacc[j] = channelRacc[j-1] + channelR[j];
channelGacc[j] = channelGacc[j-1] + channelG[j];
channelBacc[j] = channelBacc[j-1] + channelB[j];
}
// printf("Canal R acc \n");
// for(int k = 0; k < 256; k++)
// printf("%i - %i \n", k, channelRacc[k]);
}
__syncthreads();
//Ecualizamos con los primeros 256 hilos.
if(i < 256){
channelReq[i] = round((float)channelRacc[i] * 255.0/( (float)numRows*numCols ) );
channelGeq[i] = round((float)channelGacc[i] * 255.0/( (float)numRows*numCols ) );
channelBeq[i] = round((float)channelBacc[i] * 255.0/( (float)numRows*numCols ) );
}
__syncthreads();
outputImage[i].x = channelReq[px.x];
outputImage[i].y = channelGeq[px.y];
outputImage[i].z = channelBeq[px.z];
__syncthreads();
//printf( "Despues: R: %i G: %i B %i \n",outputImage[i].x,outputImage[i].y,outputImage[i].z );
}
}
void image_equalization(uchar4 * const d_rgbaImage,
uchar4* const d_outputImage, size_t numRows, size_t numCols)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
image_equalization_kernel<<<gridSize, blockSize>>>(d_rgbaImage, d_outputImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
fbfaa26542b6a0418cf75c31d61e53bfb1d33b30.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <fstream>
#include <chrono>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
using std::chrono::steady_clock;
using std::chrono::duration;
using std::chrono::duration_cast;
#define FILE_NAME "/home/thaivu/Projects/CUDA-NVIDIA_Learning/Lab2_MuliMatrix/SampleOfNvidia/matrixMul/benchmark_log_JetsonNano_shmem.txt"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = (rand() / (float)RAND_MAX) * 100.0;
}
void matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB, std::ostream &fileout)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float*)malloc(mem_size_B);
// hipStream_t stream;
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = (float*)malloc(mem_size_C);
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc((void **)(&d_A), mem_size_A));
checkCudaErrors(hipMalloc((void **)(&d_B), mem_size_B));
checkCudaErrors(hipMalloc((void **)(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
// printf("Computing result using CUDA Kernel...\n");
// // Performs warmup operation using matrixMul CUDA kernel
// if (block_size == 16) {
// MatrixMulCUDA<16> <<< grid, threads, 0>>>(d_C, d_A, d_B,
// dimsA.x, dimsB.x);
// } else {
// MatrixMulCUDA<32> <<< grid, threads, 0>>>(d_C, d_A, d_B,
// dimsA.x, dimsB.x);
// }
// printf("done\n");
// // checkCudaErrors(hipStreamSynchronize(stream));
// checkCudaErrors(hipDeviceSynchronize());
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
int nIter = 50;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
fileout << (int)dimsA.x << ", " << msecPerMatrixMul << ", " << flopsPerMatrixMul << ", " << gigaFlops;
// Copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
// verify the result of matrix multiplication
float *reference = (float *)malloc(mem_size_C);
steady_clock::time_point start_CPU = steady_clock::now();
matrixMulCPU(reference, h_A, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x); // matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB);
steady_clock::time_point end_CPU = steady_clock::now();
fileout << ", " << duration_cast <duration<double>>(end_CPU - start_CPU).count() << "\n";
printf("done.\n");
printf("Checking computed result for correctness: ");
bool correct = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
// printf("\nNOTE: The CUDA Samples are not meant for performance"\
// "measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
std::ofstream fileout;
fileout.open(FILE_NAME, std::ios_base::out | std::ios_base::app );
fileout << "kernel_size, time(msec), ops, GFlop/s, time_CPU(sec)\n" ;
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
// int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
// for (int i = 1; i <= 128; i *= 2)
int i = 128;
{
dim3 dimsA(i * block_size, i * block_size, 1);
dim3 dimsB(i * block_size, i * block_size, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB, fileout);
if (matrix_result != 0)
return matrix_result;
}
fileout.close();
return 0;
}
| fbfaa26542b6a0418cf75c31d61e53bfb1d33b30.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <fstream>
#include <chrono>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
using std::chrono::steady_clock;
using std::chrono::duration;
using std::chrono::duration_cast;
#define FILE_NAME "/home/thaivu/Projects/CUDA-NVIDIA_Learning/Lab2_MuliMatrix/SampleOfNvidia/matrixMul/benchmark_log_JetsonNano_shmem.txt"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = (rand() / (float)RAND_MAX) * 100.0;
}
void matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB, std::ostream &fileout)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float*)malloc(mem_size_B);
// cudaStream_t stream;
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = (float*)malloc(mem_size_C);
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc((void **)(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc((void **)(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc((void **)(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
// printf("Computing result using CUDA Kernel...\n");
// // Performs warmup operation using matrixMul CUDA kernel
// if (block_size == 16) {
// MatrixMulCUDA<16> <<< grid, threads, 0>>>(d_C, d_A, d_B,
// dimsA.x, dimsB.x);
// } else {
// MatrixMulCUDA<32> <<< grid, threads, 0>>>(d_C, d_A, d_B,
// dimsA.x, dimsB.x);
// }
// printf("done\n");
// // checkCudaErrors(cudaStreamSynchronize(stream));
// checkCudaErrors(cudaDeviceSynchronize());
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
int nIter = 50;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<<grid, threads, 0>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<<grid, threads, 0>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
fileout << (int)dimsA.x << ", " << msecPerMatrixMul << ", " << flopsPerMatrixMul << ", " << gigaFlops;
// Copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
// verify the result of matrix multiplication
float *reference = (float *)malloc(mem_size_C);
steady_clock::time_point start_CPU = steady_clock::now();
matrixMulCPU(reference, h_A, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x); // matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB);
steady_clock::time_point end_CPU = steady_clock::now();
fileout << ", " << duration_cast <duration<double>>(end_CPU - start_CPU).count() << "\n";
printf("done.\n");
printf("Checking computed result for correctness: ");
bool correct = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
// printf("\nNOTE: The CUDA Samples are not meant for performance"\
// "measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
std::ofstream fileout;
fileout.open(FILE_NAME, std::ios_base::out | std::ios_base::app );
fileout << "kernel_size, time(msec), ops, GFlop/s, time_CPU(sec)\n" ;
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
// int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
// for (int i = 1; i <= 128; i *= 2)
int i = 128;
{
dim3 dimsA(i * block_size, i * block_size, 1);
dim3 dimsB(i * block_size, i * block_size, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB, fileout);
if (matrix_result != 0)
return matrix_result;
}
fileout.close();
return 0;
}
|
a693d77b1d589976414059041b4643503fee07cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| a693d77b1d589976414059041b4643503fee07cb.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
e571ce2be53fa9a4e9594a88c3cfcbc425278120.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include "rocblas.h"
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__global__ void summing(float* a_cuda, float* b_cuda, int n){
int t_numx = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
__shared__ float blocksum;
if (tid ==0){
blocksum = 0;}
__syncthreads();
if (t_numx <n){
atomicAdd(&blocksum, a_cuda[t_numx]);
}
__syncthreads();
if (tid ==0){
b_cuda[blockIdx.x] += blocksum;
}
}
int main(int argc, char **argv)
{
int n = 1000000;
float* a = new float[n];
for (int i=0 ; i<n; i++){
a[i] = i;
}
float* a_cuda, *b_cuda;
hipMalloc((void**)&a_cuda, n*sizeof(float));
hipMemcpy(a_cuda, a , n*sizeof(float), hipMemcpyHostToDevice);
int block = 1024;
int grid = ((block + n -1)/block);
hipMalloc((void**)&b_cuda, grid*sizeof(float));
hipMemset(b_cuda, 0 , grid*sizeof(float));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float* b = new float[grid];
float sum_l =0;
hipEventRecord(start);
hipLaunchKernelGGL(( summing) , dim3(grid), dim3(block), 0, 0, a_cuda, b_cuda , n);
hipMemcpy(b , b_cuda, grid*sizeof(float) , hipMemcpyDeviceToHost);
for (int i=0 ; i<grid ; i++){
sum_l +=b[i];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "ARRAY SUM: " << sum_l << " Time: "<< milliseconds << endl;
hipEventDestroy(start);
hipEventDestroy(stop);
free(a);
free(b);
hipFree(a_cuda);
hipFree(b_cuda);
return 0;
}
| e571ce2be53fa9a4e9594a88c3cfcbc425278120.cu | #include <cuda_runtime.h>
#include <iostream>
#include "cublas.h"
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__global__ void summing(float* a_cuda, float* b_cuda, int n){
int t_numx = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
__shared__ float blocksum;
if (tid ==0){
blocksum = 0;}
__syncthreads();
if (t_numx <n){
atomicAdd(&blocksum, a_cuda[t_numx]);
}
__syncthreads();
if (tid ==0){
b_cuda[blockIdx.x] += blocksum;
}
}
int main(int argc, char **argv)
{
int n = 1000000;
float* a = new float[n];
for (int i=0 ; i<n; i++){
a[i] = i;
}
float* a_cuda, *b_cuda;
cudaMalloc((void**)&a_cuda, n*sizeof(float));
cudaMemcpy(a_cuda, a , n*sizeof(float), cudaMemcpyHostToDevice);
int block = 1024;
int grid = ((block + n -1)/block);
cudaMalloc((void**)&b_cuda, grid*sizeof(float));
cudaMemset(b_cuda, 0 , grid*sizeof(float));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* b = new float[grid];
float sum_l =0;
cudaEventRecord(start);
summing <<<grid, block>>> (a_cuda, b_cuda , n);
cudaMemcpy(b , b_cuda, grid*sizeof(float) , cudaMemcpyDeviceToHost);
for (int i=0 ; i<grid ; i++){
sum_l +=b[i];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "ARRAY SUM: " << sum_l << " Time: "<< milliseconds << endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(a);
free(b);
cudaFree(a_cuda);
cudaFree(b_cuda);
return 0;
}
|
eb57494885135dc65b992b251831561593b3396d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <omp.h>
#include <cstdio>
#include <cstdlib>
#include "myio.h"
#include "mylib.h"
#include "init.h"
#include "myio.h"
#include "wave.h"
#include "kernels.h"
#include <vector>
void rtm_f(float *image,const float *data,const float *c11,const float *c13,const float *c33,const float *wavelet,const int *sloc,int ns,const int *rloc,int nr,const float *taper,int nx,int nz,int nt,int npad,float dx,float dz,float dt,float rate,float ot){
fprintf(stderr,"Starting migration...\n");
int ratio=rate/dt+0.5f;
int ntNeg=std::round(abs(ot)/dt);
int nnt=(nt-1)/ratio+1;
int nnt_data=(nt-ntNeg-1)/ratio+1;
int nnx=nx+2*npad,nnz=nz+2*npad;
int nnxz=nnx*nnz;
float dx2=dx*dx,dz2=dz*dz,dt2=dt*dt;
memset(image,0,nnxz*sizeof(float));
std::vector<int> GPUs;
get_array("gpu",GPUs);
int nGPUs=GPUs.size();
fprintf(stderr,"Total # GPUs = %d\n",nGPUs);
fprintf(stderr,"GPUs used are:\n");
for(int i=0;i<nGPUs;i++) fprintf(stderr,"%d",GPUs[i]);
fprintf(stderr,"\n");
float **sourceWavefield=new float*[nGPUs]();
// float **recWavefield=new float*[nGPUs]();
int **d_rloc=new int*[nGPUs]();
float **d_c11=new float*[nGPUs]();
float **d_c13=new float*[nGPUs]();
float **d_c33=new float*[nGPUs]();
float **d_taper=new float*[nGPUs]();
float **d_sourceWavefieldSlice=new float*[nGPUs]();
float **p0=new float*[nGPUs]();
float **q0=new float*[nGPUs]();
float **p1=new float*[nGPUs]();
float **q1=new float*[nGPUs]();
float **d_image=new float*[nGPUs]();
float **d_sourceWavefieldSlice0=new float*[nGPUs]();
float **d_sourceWavefieldSlice1=new float*[nGPUs]();
float **d_data0=new float*[nGPUs]();
float **d_data1=new float*[nGPUs]();
float **images=new float*[nGPUs]();
for(int i=0;i<nGPUs;++i){
hipSetDevice(GPUs[i]);
sourceWavefield[i]=new float[nnxz*nnt]();
// recWavefield[i]=new float[nnxz*nnt]();
hipMalloc(&d_c11[i],nnxz*sizeof(float));
hipMalloc(&d_c13[i],nnxz*sizeof(float));
hipMalloc(&d_c33[i],nnxz*sizeof(float));
hipMemcpy(d_c11[i],c11,nnxz*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_c13[i],c13,nnxz*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_c33[i],c33,nnxz*sizeof(float),hipMemcpyHostToDevice);
hipMalloc(&d_taper[i],nnxz*sizeof(float));
hipMemcpy(d_taper[i],taper,nnxz*sizeof(float),hipMemcpyHostToDevice);
hipMalloc(&d_sourceWavefieldSlice[i],nnxz*sizeof(float));
hipMemset(d_sourceWavefieldSlice[i],0,nnxz*sizeof(float));
hipMalloc(&p0[i],nnxz*sizeof(float));
hipMalloc(&p1[i],nnxz*sizeof(float));
hipMalloc(&q0[i],nnxz*sizeof(float));
hipMalloc(&q1[i],nnxz*sizeof(float));
hipMalloc(&d_image[i],nnxz*sizeof(float));
hipMemset(d_image[i],0,nnxz*sizeof(float));
hipMalloc(&d_sourceWavefieldSlice0[i],nnxz*sizeof(float));
hipMalloc(&d_sourceWavefieldSlice1[i],nnxz*sizeof(float));
hipMemset(d_sourceWavefieldSlice0[i],0,nnxz*sizeof(float));
hipMemset(d_sourceWavefieldSlice1[i],0,nnxz*sizeof(float));
images[i]=new float[nnxz]();
}
int npasses=(ns+nGPUs-1)/nGPUs;
int shotLeft=ns;
for(int pass=0;pass<npasses;++pass){
int nGPUsNeed=min(shotLeft,nGPUs);
fprintf(stderr,"Pass %d, # GPUs = %d\n",pass,nGPUsNeed);
#pragma omp parallel for num_threads(nGPUsNeed)
for(int i=0;i<nGPUsNeed;++i){
hipSetDevice(GPUs[i]);
int is=pass*nGPUs+i;
int slocxz=sloc[0+is*4]+sloc[1+is*4]*nnx;
hipMalloc(&d_rloc[i],2*sloc[2+is*4]*sizeof(int));
hipMemcpy(d_rloc[i],rloc+2*sloc[3+is*4],2*sloc[2+is*4]*sizeof(int),hipMemcpyHostToDevice);
dim3 block(BLOCK_DIM_X,BLOCK_DIM_Y);
dim3 grid((nnx-2*RADIUS+BLOCK_DIM_X-1)/BLOCK_DIM_X,(nnz-2*RADIUS+BLOCK_DIM_Y-1)/BLOCK_DIM_Y);
hipMemset(p0[i],0,nnxz*sizeof(float));
hipMemset(q0[i],0,nnxz*sizeof(float));
hipMemset(p1[i],0,nnxz*sizeof(float));
hipMemset(q1[i],0,nnxz*sizeof(float));
hipLaunchKernelGGL(( injectDipoleSource), dim3(1),dim3(1), 0, 0, p1[i],q1[i],dt2*wavelet[0],slocxz,nnx);
hipLaunchKernelGGL(( abc), dim3(grid),dim3(block), 0, 0, p1[i],q1[i],d_taper[i],nnx,nnz);
if(ratio==1){
hipLaunchKernelGGL(( recordWavefieldSlice), dim3(grid),dim3(block), 0, 0, d_sourceWavefieldSlice[i],p1[i],q1[i],nnx,nnz);
hipMemcpy(sourceWavefield[i]+nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),hipMemcpyDeviceToHost);
}
for(int it=2;it<nt;++it){
//fprintf(stderr,"Time step it=%d\n",it);
hipLaunchKernelGGL(( forwardCD), dim3(grid),dim3(block), 0, 0, p0[i],q0[i],p1[i],q1[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
hipLaunchKernelGGL(( injectDipoleSource), dim3(1),dim3(1), 0, 0, p0[i],q0[i],dt2*wavelet[it-1],slocxz,nnx);
hipLaunchKernelGGL(( abc), dim3(grid),dim3(block), 0, 0, p1[i],q1[i],p0[i],q0[i],d_taper[i],nnx,nnz);
if(it%ratio==0){
hipLaunchKernelGGL(( recordWavefieldSlice), dim3(grid),dim3(block), 0, 0, d_sourceWavefieldSlice[i],p0[i],q0[i],nnx,nnz);
hipMemcpy(sourceWavefield[i]+(it/ratio)*nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),hipMemcpyDeviceToHost);
}
float *pt=p0[i];
p0[i]=p1[i];
p1[i]=pt;
pt=q0[i];
q0[i]=q1[i];
q1[i]=pt;
}
// write("sourceWavefield",sourceWavefield[i],nnxz*nnt);
// to_header("sourceWavefield","n1",nnx,"o1",-dx*npad,"d1",dx);
// to_header("sourceWavefield","n2",nnz,"o2",-dz*npad,"d2",dz);
// to_header("sourceWavefield","n3",nnt,"o3",ot,"d3",rate);
// hipMemset(d_sourceWavefieldSlice[i],0,nnxz*sizeof(float));
hipMemset(p0[i],0,nnxz*sizeof(float));
hipMemset(q0[i],0,nnxz*sizeof(float));
hipMemset(p1[i],0,nnxz*sizeof(float));
hipMemset(q1[i],0,nnxz*sizeof(float));
hipMalloc(&d_data0[i],sloc[2+is*4]*sizeof(float));
hipMalloc(&d_data1[i],sloc[2+is*4]*sizeof(float));
hipMemset(d_data0[i],0,sloc[2+is*4]*sizeof(float));
hipMemset(d_data1[i],0,sloc[2+is*4]*sizeof(float));
hipMemcpy(d_data0[i],data+(nnt_data-1)*nr+sloc[3+is*4],sloc[2+is*4]*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( injectDipoleData), dim3((sloc[2+is*4]+BLOCK_DIM_X-1)/BLOCK_DIM_X),dim3(BLOCK_DIM_X), 0, 0, p0[i],q0[i],d_data0[i],d_data1[i],0.f,d_rloc[i],sloc[2+is*4],nnx,dt2);
hipLaunchKernelGGL(( abc), dim3(grid),dim3(block), 0, 0, p0[i],q0[i],d_taper[i],nnx,nnz);
float f=(nt-2.)/ratio;
int i1=f,i2=i1+1;
hipMemcpy(d_sourceWavefieldSlice0[i],sourceWavefield[i]+i1*nnxz,nnxz*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_sourceWavefieldSlice1[i],sourceWavefield[i]+i2*nnxz,nnxz*sizeof(float),hipMemcpyHostToDevice);
f=f-i1;
hipLaunchKernelGGL(( imagingCrossCor), dim3(grid),dim3(block), 0, 0, d_image[i],p0[i],q0[i],d_sourceWavefieldSlice0[i],d_sourceWavefieldSlice1[i],f,nnx,nnz);
for(int it=nt-3;it>=0;--it){
//fprintf(stderr,"Time step it=%d\n",it);
// backwardDC<<<grid,block>>>(p1[i],q1[i],p0[i],q0[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
hipLaunchKernelGGL(( forwardCD), dim3(grid),dim3(block), 0, 0, p1[i],q1[i],p0[i],q0[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
if(it>=ntNeg){
f=(it-ntNeg+1.)/ratio;
i1=f;
if((it-ntNeg+2)%ratio==0){
hipMemcpy(d_data1[i],data+i1*nr+sloc[3+is*4],sloc[2+is*4]*sizeof(float),hipMemcpyHostToDevice);
float *pt=d_data0[i];
d_data0[i]=d_data1[i];
d_data1[i]=pt;
}
f=f-i1;
hipLaunchKernelGGL(( injectDipoleData), dim3((sloc[2+is*4]+BLOCK_DIM_X-1)/BLOCK_DIM_X),dim3(BLOCK_DIM_X), 0, 0, p1[i],q1[i],d_data0[i],d_data1[i],f,d_rloc[i],sloc[2+is*4],nnx,dt2);
}
hipLaunchKernelGGL(( abc), dim3(grid),dim3(block), 0, 0, p1[i],q1[i],p0[i],q0[i],d_taper[i],nnx,nnz);
// if(it%ratio==0){
// hipLaunchKernelGGL(( recordWavefieldSlice), dim3(grid),dim3(block), 0, 0, d_sourceWavefieldSlice[i],p0[i],q0[i],nnx,nnz);
// hipMemcpy(recWavefield[i]+(it/ratio)*nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),hipMemcpyDeviceToHost);
// }
f=(float)it/ratio;
i1=f;
if((it+1)%ratio==0){
hipMemcpy(d_sourceWavefieldSlice1[i],sourceWavefield[i]+i1*nnxz,nnxz*sizeof(float),hipMemcpyHostToDevice);
float *pt=d_sourceWavefieldSlice0[i];
d_sourceWavefieldSlice0[i]=d_sourceWavefieldSlice1[i];
d_sourceWavefieldSlice1[i]=pt;
}
f=f-i1;
hipLaunchKernelGGL(( imagingCrossCor), dim3(grid),dim3(block), 0, 0, d_image[i],p1[i],q1[i],d_sourceWavefieldSlice0[i],d_sourceWavefieldSlice1[i],f,nnx,nnz);
float *pt=p0[i];
p0[i]=p1[i];
p1[i]=pt;
pt=q0[i];
q0[i]=q1[i];
q1[i]=pt;
}
// write("recWavefield",recWavefield[i],nnxz*nnt);
// to_header("recWavefield","n1",nnx,"o1",-dx*npad,"d1",dx);
// to_header("recWavefield","n2",nnz,"o2",-dz*npad,"d2",dz);
// to_header("recWavefield","n3",nnt,"o3",ot,"d3",rate);
hipFree(d_rloc[i]);hipFree(d_data0[i]);hipFree(d_data1[i]);
}
shotLeft-=nGPUsNeed;
}
#pragma omp parallel for num_threads(nGPUs)
for(int i=0;i<nGPUs;i++){
hipSetDevice(GPUs[i]);
hipMemcpy(images[i],d_image[i],nnxz*sizeof(float),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
for(int i=0;i<nGPUs;++i){
#pragma omp parallel for num_threads(16) shared(i)
for(size_t ixz=0;ixz<nnxz;++ixz){
image[ixz]+=images[i][ixz];
}
}
for(int i=0;i<nGPUs;++i){
hipSetDevice(GPUs[i]);
delete []sourceWavefield[i];
hipFree(d_c11[i]);hipFree(d_c13[i]);hipFree(d_c33[i]);
hipFree(d_taper[i]);
hipFree(d_sourceWavefieldSlice[i]);
hipFree(p0[i]);hipFree(p1[i]);hipFree(q0[i]);hipFree(q1[i]);
hipFree(d_image[i]);
hipFree(d_sourceWavefieldSlice0[i]);hipFree(d_sourceWavefieldSlice1[i]);
delete []images[i];
hipError_t e=hipGetLastError();
if(e!=hipSuccess) fprintf(stderr,"gpu %d error %s\n",GPUs[i],hipGetErrorString(e));
}
delete []sourceWavefield;
delete []d_rloc;
delete []d_c11;delete []d_c13;delete []d_c33;
delete []d_taper;
delete []d_sourceWavefieldSlice;
delete []p0;delete []p1;delete []q0;delete []q1;
delete []d_image;
delete []d_sourceWavefieldSlice0;delete []d_sourceWavefieldSlice1;delete []d_data0;delete []d_data1;
delete []images;
return;
}
| eb57494885135dc65b992b251831561593b3396d.cu | #include <omp.h>
#include <cstdio>
#include <cstdlib>
#include "myio.h"
#include "mylib.h"
#include "init.h"
#include "myio.h"
#include "wave.h"
#include "kernels.h"
#include <vector>
void rtm_f(float *image,const float *data,const float *c11,const float *c13,const float *c33,const float *wavelet,const int *sloc,int ns,const int *rloc,int nr,const float *taper,int nx,int nz,int nt,int npad,float dx,float dz,float dt,float rate,float ot){
fprintf(stderr,"Starting migration...\n");
int ratio=rate/dt+0.5f;
int ntNeg=std::round(abs(ot)/dt);
int nnt=(nt-1)/ratio+1;
int nnt_data=(nt-ntNeg-1)/ratio+1;
int nnx=nx+2*npad,nnz=nz+2*npad;
int nnxz=nnx*nnz;
float dx2=dx*dx,dz2=dz*dz,dt2=dt*dt;
memset(image,0,nnxz*sizeof(float));
std::vector<int> GPUs;
get_array("gpu",GPUs);
int nGPUs=GPUs.size();
fprintf(stderr,"Total # GPUs = %d\n",nGPUs);
fprintf(stderr,"GPUs used are:\n");
for(int i=0;i<nGPUs;i++) fprintf(stderr,"%d",GPUs[i]);
fprintf(stderr,"\n");
float **sourceWavefield=new float*[nGPUs]();
// float **recWavefield=new float*[nGPUs]();
int **d_rloc=new int*[nGPUs]();
float **d_c11=new float*[nGPUs]();
float **d_c13=new float*[nGPUs]();
float **d_c33=new float*[nGPUs]();
float **d_taper=new float*[nGPUs]();
float **d_sourceWavefieldSlice=new float*[nGPUs]();
float **p0=new float*[nGPUs]();
float **q0=new float*[nGPUs]();
float **p1=new float*[nGPUs]();
float **q1=new float*[nGPUs]();
float **d_image=new float*[nGPUs]();
float **d_sourceWavefieldSlice0=new float*[nGPUs]();
float **d_sourceWavefieldSlice1=new float*[nGPUs]();
float **d_data0=new float*[nGPUs]();
float **d_data1=new float*[nGPUs]();
float **images=new float*[nGPUs]();
for(int i=0;i<nGPUs;++i){
cudaSetDevice(GPUs[i]);
sourceWavefield[i]=new float[nnxz*nnt]();
// recWavefield[i]=new float[nnxz*nnt]();
cudaMalloc(&d_c11[i],nnxz*sizeof(float));
cudaMalloc(&d_c13[i],nnxz*sizeof(float));
cudaMalloc(&d_c33[i],nnxz*sizeof(float));
cudaMemcpy(d_c11[i],c11,nnxz*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_c13[i],c13,nnxz*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_c33[i],c33,nnxz*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&d_taper[i],nnxz*sizeof(float));
cudaMemcpy(d_taper[i],taper,nnxz*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&d_sourceWavefieldSlice[i],nnxz*sizeof(float));
cudaMemset(d_sourceWavefieldSlice[i],0,nnxz*sizeof(float));
cudaMalloc(&p0[i],nnxz*sizeof(float));
cudaMalloc(&p1[i],nnxz*sizeof(float));
cudaMalloc(&q0[i],nnxz*sizeof(float));
cudaMalloc(&q1[i],nnxz*sizeof(float));
cudaMalloc(&d_image[i],nnxz*sizeof(float));
cudaMemset(d_image[i],0,nnxz*sizeof(float));
cudaMalloc(&d_sourceWavefieldSlice0[i],nnxz*sizeof(float));
cudaMalloc(&d_sourceWavefieldSlice1[i],nnxz*sizeof(float));
cudaMemset(d_sourceWavefieldSlice0[i],0,nnxz*sizeof(float));
cudaMemset(d_sourceWavefieldSlice1[i],0,nnxz*sizeof(float));
images[i]=new float[nnxz]();
}
int npasses=(ns+nGPUs-1)/nGPUs;
int shotLeft=ns;
for(int pass=0;pass<npasses;++pass){
int nGPUsNeed=min(shotLeft,nGPUs);
fprintf(stderr,"Pass %d, # GPUs = %d\n",pass,nGPUsNeed);
#pragma omp parallel for num_threads(nGPUsNeed)
for(int i=0;i<nGPUsNeed;++i){
cudaSetDevice(GPUs[i]);
int is=pass*nGPUs+i;
int slocxz=sloc[0+is*4]+sloc[1+is*4]*nnx;
cudaMalloc(&d_rloc[i],2*sloc[2+is*4]*sizeof(int));
cudaMemcpy(d_rloc[i],rloc+2*sloc[3+is*4],2*sloc[2+is*4]*sizeof(int),cudaMemcpyHostToDevice);
dim3 block(BLOCK_DIM_X,BLOCK_DIM_Y);
dim3 grid((nnx-2*RADIUS+BLOCK_DIM_X-1)/BLOCK_DIM_X,(nnz-2*RADIUS+BLOCK_DIM_Y-1)/BLOCK_DIM_Y);
cudaMemset(p0[i],0,nnxz*sizeof(float));
cudaMemset(q0[i],0,nnxz*sizeof(float));
cudaMemset(p1[i],0,nnxz*sizeof(float));
cudaMemset(q1[i],0,nnxz*sizeof(float));
injectDipoleSource<<<1,1>>>(p1[i],q1[i],dt2*wavelet[0],slocxz,nnx);
abc<<<grid,block>>>(p1[i],q1[i],d_taper[i],nnx,nnz);
if(ratio==1){
recordWavefieldSlice<<<grid,block>>>(d_sourceWavefieldSlice[i],p1[i],q1[i],nnx,nnz);
cudaMemcpy(sourceWavefield[i]+nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),cudaMemcpyDeviceToHost);
}
for(int it=2;it<nt;++it){
//fprintf(stderr,"Time step it=%d\n",it);
forwardCD<<<grid,block>>>(p0[i],q0[i],p1[i],q1[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
injectDipoleSource<<<1,1>>>(p0[i],q0[i],dt2*wavelet[it-1],slocxz,nnx);
abc<<<grid,block>>>(p1[i],q1[i],p0[i],q0[i],d_taper[i],nnx,nnz);
if(it%ratio==0){
recordWavefieldSlice<<<grid,block>>>(d_sourceWavefieldSlice[i],p0[i],q0[i],nnx,nnz);
cudaMemcpy(sourceWavefield[i]+(it/ratio)*nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),cudaMemcpyDeviceToHost);
}
float *pt=p0[i];
p0[i]=p1[i];
p1[i]=pt;
pt=q0[i];
q0[i]=q1[i];
q1[i]=pt;
}
// write("sourceWavefield",sourceWavefield[i],nnxz*nnt);
// to_header("sourceWavefield","n1",nnx,"o1",-dx*npad,"d1",dx);
// to_header("sourceWavefield","n2",nnz,"o2",-dz*npad,"d2",dz);
// to_header("sourceWavefield","n3",nnt,"o3",ot,"d3",rate);
// cudaMemset(d_sourceWavefieldSlice[i],0,nnxz*sizeof(float));
cudaMemset(p0[i],0,nnxz*sizeof(float));
cudaMemset(q0[i],0,nnxz*sizeof(float));
cudaMemset(p1[i],0,nnxz*sizeof(float));
cudaMemset(q1[i],0,nnxz*sizeof(float));
cudaMalloc(&d_data0[i],sloc[2+is*4]*sizeof(float));
cudaMalloc(&d_data1[i],sloc[2+is*4]*sizeof(float));
cudaMemset(d_data0[i],0,sloc[2+is*4]*sizeof(float));
cudaMemset(d_data1[i],0,sloc[2+is*4]*sizeof(float));
cudaMemcpy(d_data0[i],data+(nnt_data-1)*nr+sloc[3+is*4],sloc[2+is*4]*sizeof(float),cudaMemcpyHostToDevice);
injectDipoleData<<<(sloc[2+is*4]+BLOCK_DIM_X-1)/BLOCK_DIM_X,BLOCK_DIM_X>>>(p0[i],q0[i],d_data0[i],d_data1[i],0.f,d_rloc[i],sloc[2+is*4],nnx,dt2);
abc<<<grid,block>>>(p0[i],q0[i],d_taper[i],nnx,nnz);
float f=(nt-2.)/ratio;
int i1=f,i2=i1+1;
cudaMemcpy(d_sourceWavefieldSlice0[i],sourceWavefield[i]+i1*nnxz,nnxz*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_sourceWavefieldSlice1[i],sourceWavefield[i]+i2*nnxz,nnxz*sizeof(float),cudaMemcpyHostToDevice);
f=f-i1;
imagingCrossCor<<<grid,block>>>(d_image[i],p0[i],q0[i],d_sourceWavefieldSlice0[i],d_sourceWavefieldSlice1[i],f,nnx,nnz);
for(int it=nt-3;it>=0;--it){
//fprintf(stderr,"Time step it=%d\n",it);
// backwardDC<<<grid,block>>>(p1[i],q1[i],p0[i],q0[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
forwardCD<<<grid,block>>>(p1[i],q1[i],p0[i],q0[i],d_c11[i],d_c13[i],d_c33[i],dx2,dz2,dt2,nnx,nnz);
if(it>=ntNeg){
f=(it-ntNeg+1.)/ratio;
i1=f;
if((it-ntNeg+2)%ratio==0){
cudaMemcpy(d_data1[i],data+i1*nr+sloc[3+is*4],sloc[2+is*4]*sizeof(float),cudaMemcpyHostToDevice);
float *pt=d_data0[i];
d_data0[i]=d_data1[i];
d_data1[i]=pt;
}
f=f-i1;
injectDipoleData<<<(sloc[2+is*4]+BLOCK_DIM_X-1)/BLOCK_DIM_X,BLOCK_DIM_X>>>(p1[i],q1[i],d_data0[i],d_data1[i],f,d_rloc[i],sloc[2+is*4],nnx,dt2);
}
abc<<<grid,block>>>(p1[i],q1[i],p0[i],q0[i],d_taper[i],nnx,nnz);
// if(it%ratio==0){
// recordWavefieldSlice<<<grid,block>>>(d_sourceWavefieldSlice[i],p0[i],q0[i],nnx,nnz);
// cudaMemcpy(recWavefield[i]+(it/ratio)*nnxz,d_sourceWavefieldSlice[i],nnxz*sizeof(float),cudaMemcpyDeviceToHost);
// }
f=(float)it/ratio;
i1=f;
if((it+1)%ratio==0){
cudaMemcpy(d_sourceWavefieldSlice1[i],sourceWavefield[i]+i1*nnxz,nnxz*sizeof(float),cudaMemcpyHostToDevice);
float *pt=d_sourceWavefieldSlice0[i];
d_sourceWavefieldSlice0[i]=d_sourceWavefieldSlice1[i];
d_sourceWavefieldSlice1[i]=pt;
}
f=f-i1;
imagingCrossCor<<<grid,block>>>(d_image[i],p1[i],q1[i],d_sourceWavefieldSlice0[i],d_sourceWavefieldSlice1[i],f,nnx,nnz);
float *pt=p0[i];
p0[i]=p1[i];
p1[i]=pt;
pt=q0[i];
q0[i]=q1[i];
q1[i]=pt;
}
// write("recWavefield",recWavefield[i],nnxz*nnt);
// to_header("recWavefield","n1",nnx,"o1",-dx*npad,"d1",dx);
// to_header("recWavefield","n2",nnz,"o2",-dz*npad,"d2",dz);
// to_header("recWavefield","n3",nnt,"o3",ot,"d3",rate);
cudaFree(d_rloc[i]);cudaFree(d_data0[i]);cudaFree(d_data1[i]);
}
shotLeft-=nGPUsNeed;
}
#pragma omp parallel for num_threads(nGPUs)
for(int i=0;i<nGPUs;i++){
cudaSetDevice(GPUs[i]);
cudaMemcpy(images[i],d_image[i],nnxz*sizeof(float),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
for(int i=0;i<nGPUs;++i){
#pragma omp parallel for num_threads(16) shared(i)
for(size_t ixz=0;ixz<nnxz;++ixz){
image[ixz]+=images[i][ixz];
}
}
for(int i=0;i<nGPUs;++i){
cudaSetDevice(GPUs[i]);
delete []sourceWavefield[i];
cudaFree(d_c11[i]);cudaFree(d_c13[i]);cudaFree(d_c33[i]);
cudaFree(d_taper[i]);
cudaFree(d_sourceWavefieldSlice[i]);
cudaFree(p0[i]);cudaFree(p1[i]);cudaFree(q0[i]);cudaFree(q1[i]);
cudaFree(d_image[i]);
cudaFree(d_sourceWavefieldSlice0[i]);cudaFree(d_sourceWavefieldSlice1[i]);
delete []images[i];
cudaError_t e=cudaGetLastError();
if(e!=cudaSuccess) fprintf(stderr,"gpu %d error %s\n",GPUs[i],cudaGetErrorString(e));
}
delete []sourceWavefield;
delete []d_rloc;
delete []d_c11;delete []d_c13;delete []d_c33;
delete []d_taper;
delete []d_sourceWavefieldSlice;
delete []p0;delete []p1;delete []q0;delete []q1;
delete []d_image;
delete []d_sourceWavefieldSlice0;delete []d_sourceWavefieldSlice1;delete []d_data0;delete []d_data1;
delete []images;
return;
}
|
8d6298ca063205c722ca012a9bdb647c0471d898.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char log_name[] = "log_kernel";
void log_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log_string = jiterator_stringify(
template <typename T> T log_kernel(T x) { return ::log(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "log_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::log(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char log10_name[] = "log10_kernel";
void log10_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log10_string = jiterator_stringify(
template <typename T> T log10_kernel(T x) { return std::log10(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log10_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log10_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log10_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log10(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
}
void log1p_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char log2_name[] = "log2_kernel";
void log2_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log2_string = jiterator_stringify(
template <typename T> T log2_kernel(T x) { return std::log2(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log2_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log2_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log2_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log2(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
} // namespace at::native
| 8d6298ca063205c722ca012a9bdb647c0471d898.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char log_name[] = "log_kernel";
void log_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log_string = jiterator_stringify(
template <typename T> T log_kernel(T x) { return std::log(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "log_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::log(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char log10_name[] = "log10_kernel";
void log10_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log10_string = jiterator_stringify(
template <typename T> T log10_kernel(T x) { return std::log10(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log10_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log10_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log10_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log10(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
}
void log1p_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
CONSTEXPR_EXCEPT_WIN_CUDA char log2_name[] = "log2_kernel";
void log2_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log2_string = jiterator_stringify(
template <typename T> T log2_kernel(T x) { return std::log2(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log2_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log2_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log2_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log2(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
} // namespace at::native
|
b8b5d7cbd5c67b6eed37b4ce1ade9640ab6c475d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const PathSegment *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
bools[index] = (idata[index].remainingBounces <= 0) ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, PathSegment *odata,
const PathSegment *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
int i = indices[index];
if (bools[index] == 1)
odata[i] = idata[index];
}
}
}
| b8b5d7cbd5c67b6eed37b4ce1ade9640ab6c475d.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const PathSegment *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
bools[index] = (idata[index].remainingBounces <= 0) ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, PathSegment *odata,
const PathSegment *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
int i = indices[index];
if (bools[index] == 1)
odata[i] = idata[index];
}
}
}
|
915d0ad0c895123442d9440063410ba0d175c58c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <fstream>
#include "CUDA_basic.h"
#include <math.h>
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_math.h>
//#include "sharedDefination.h"
using namespace std;
#include "RandomizedTreeGPU.h"
RandmizedTree_CUDA RandomizedTreeEngine;
texture<float,2> currentImg;
texture<float,2> currentColorImg,currentDepthImg;
texture<float,2,hipReadModeElementType> trees_device;
texture<float,2> detectionResult2D;
int maximumWidth;
//texture<float> trees_device_1D;
struct Lock {
int *mutex;
Lock( void ) {
int state = 0;
CUDA_CALL( hipMalloc( (void**)& mutex,
sizeof(int) ) );
CUDA_CALL( hipMemcpy( mutex, &state, sizeof(int),
hipMemcpyHostToDevice ) );
}
~Lock( void ) {
hipFree( mutex );
}
__device__ void lock( void ) {
while( atomicCAS( mutex, 0, 1 ) != 0 );
}
__device__ void unlock( void ) {
atomicExch( mutex, 1 );
}
};
double *cu_label_prob_all;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
int currentID;
void convertTrees2Array(Node *root_CPU, Node_GPU **root_GPU,int labelNum)
{
//if (root_CPU->label!=-1)
//{
// cout<<currentID<<" "<<root_CPU->label<<endl;
//}
//cout<<currentID<<" ";
int rootID=currentID;
root_GPU[rootID]->parameters[0]=root_CPU->pos1[0];
root_GPU[rootID]->parameters[1]=root_CPU->pos1[1];
root_GPU[rootID]->parameters[2]=root_CPU->pos2[0];
root_GPU[rootID]->parameters[3]=root_CPU->pos2[1];
root_GPU[rootID]->parameters[4]=root_CPU->label;
root_GPU[rootID]->parameters[5]=root_CPU->nLevel;
root_GPU[rootID]->parameters[8]=root_CPU->num_all;
root_GPU[rootID]->parameters[9]=root_CPU->threshold;
if (root_CPU->l_child==NULL)
{
root_GPU[rootID]->parameters[6]=-1;
}
if (root_CPU->r_child==NULL)
{
root_GPU[rootID]->parameters[7]=-1;
}
if (root_CPU->l_child==NULL&&root_CPU->r_child==NULL)//root
{
//root_GPU[currentID]->num_of_each_class=new float[labelNum];
//cout<<"leafe "<<currentID<<endl;
//int ok=false;
for (int i=0;i<labelNum;i++)
{
root_GPU[rootID]->parameters[10+i]=root_CPU->num_of_each_class[i];
/* if (root_CPU->num_of_each_class[i]>0)
{
ok=true;
}*/
}
//if (!ok)
// /*cout<<rootID<<endl;*/
//{
// for (int i=0;i<labelNum;i++)
// {
// cout<<root_CPU->num_of_each_class[i]<<" ";
// }
// cout<<endl;
//}
//tree is clear!
}
if (root_CPU->l_child!=NULL)
{
currentID++;
root_GPU[rootID]->parameters[6]=currentID;
convertTrees2Array(root_CPU->l_child, root_GPU,labelNum);
}
if (root_CPU->r_child!=NULL)
{
currentID++;
root_GPU[rootID]->parameters[7]=currentID;
convertTrees2Array(root_CPU->r_child, root_GPU,labelNum);
}
}
void outputTrees(Node_GPU **tree,int ind,ofstream &out)
{
out<<tree[ind]->parameters[5]<<" ";
out<<tree[ind]->parameters[0]<<" "<<tree[ind]->parameters[1]<<" "<<
tree[ind]->parameters[2]<<" "<<tree[ind]->parameters[3]<<" "<<tree[ind]->parameters[9]<<endl;
if (tree[ind]->parameters[6]==-1&&tree[ind]->parameters[7]==-1)
{
bool ok=false;
for (int i=0;i<RandomizedTreeEngine.labelNum;i++)
{
out<<tree[ind]->parameters[10+i]<<" ";
if (tree[ind]->parameters[10+i]>0)
{
ok=true;
}
}
out<<endl;
if (!ok)
{
cout<<"bad leaf at "<<ind<<endl;
cout<<tree[ind]->parameters[4]<<" "<<tree[ind]->parameters[5]<<endl;
}
}
else
{
if (tree[ind]->parameters[6]!=-1)
{
outputTrees(tree,tree[ind]->parameters[6],out);
}
if (tree[ind]->parameters[7]!=-1)
{
outputTrees(tree,tree[ind]->parameters[7],out);
}
}
}
__global__ void outputTree(float *tree,int MaxNumber,int treeInd,int labelNum)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<MaxNumber)
{
//printf("%d %d\n",offset,(10+MAX_LABEL_NUMBER)*MaxNumber);
int initialInd=(treeInd*MaxNumber+offset)*(10+MAX_LABEL_NUMBER);
//printf("after getting values %d %d\n",tree[initialInd+6],tree[initialInd+7]);
if (tree[initialInd+6]==-1&&tree[initialInd+7]==-1)
{
int i;
for (i=0;i<labelNum;i++)
{
if(tree[initialInd+10+i]>0)
break;
}
if (i==labelNum)
{
for (int i=0;i<labelNum;i++)
{
printf("%f ",tree[initialInd+10+i]);
}
}
printf("\n");
}
}
}
extern "C" void setData_preprocess(int _max_depth,int _min_sample_count,double _regression_accuracy, int _max_num_of_trees_in_the_forest,int _windowSize, int labelNum, Node **trees_cpu,int treeNum,bool withDepth)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
data->max_depth=_max_depth;
data->min_sample_count=_min_sample_count;
data->regression_accuracy=_regression_accuracy;
data->max_num_of_trees_in_the_forest=_max_num_of_trees_in_the_forest;
data->windowSize=_windowSize;
data->labelNum=labelNum;
cout<<"begin feeding trees\n";
//conversion the tree structure into array
data->host_trees=new Node_GPU **[data->max_num_of_trees_in_the_forest];
data->MaxNumber=(1-pow((float)2,_max_depth)/(-1));
int MaxNumber=data->MaxNumber;
for (int i=0;i<treeNum;i++)
{
data->host_trees[i]=new Node_GPU *[MaxNumber];
for (int j=0;j<MaxNumber;j++)
{
data->host_trees[i][j]=new Node_GPU();
data->host_trees[i][j]->parameters=new float[10+MAX_LABEL_NUMBER];
data->host_trees[i][j]->parameters[6]=data->host_trees[i][j]->parameters[7]=-2;
data->host_trees[i][j]->parameters[4]=-1;
data->host_trees[i][j]->parameters[8]=0;
data->host_trees[i][j]->parameters[0]=data->host_trees[i][j]->parameters[1]=data->host_trees[i][j]->parameters[2]=data->host_trees[i][j]->parameters[3]=0;
data->host_trees[i][j]->parameters[9]=0;
}
currentID=0;
convertTrees2Array(trees_cpu[i],data->host_trees[i],labelNum);
cout<<i<<" "<<currentID<<endl;
}
//cout<<"left and right node index: "<<data->host_trees[0][0]->parameters[6]<<" "<<data->host_trees[0][0]->parameters[7]<<endl;
//ofstream out("tree 0.txt",ios::out);
//outputTrees(data->host_trees[0],0,out);
//out.close();
cout<<"copying trees to GPU\n";
//CUDA_CALL(hipMalloc((void **)&data->leafnode,1*sizeof(LeafNode_GPU)));
//CUDA_CALL(hipMalloc((void **)&data->trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU*)));
////CUDA_CALL(hipMemcpy(data->trees,data->host_trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU **),hipMemcpyHostToDevice));
////data->trees=new Node_GPU **[data->max_num_of_trees_in_the_forest];
//for (int i=0;i<treeNum;i++)
//{
// CUDA_CALL(hipMalloc((void **)&data->trees[i],MaxNumber*sizeof(Node_GPU)));
//// CUDA_CALL(hipMemcpy(data->trees[i],data->host_trees[i],MaxNumber*sizeof(Node_GPU *),hipMemcpyHostToDevice));
// cout<<i<<endl;
// for (int j=0;j<MaxNumber;j++)
// {
// // cout<<j<<" ";
// CUDA_CALL(hipMalloc((void **)&data->trees[i][j].cu_parameters,10*sizeof(int)));
// CUDA_CALL(hipMemcpy(data->trees[i][j].cu_parameters,data->host_trees[i][j]->parameters,10*sizeof(int),hipMemcpyHostToDevice));
// if (data->host_trees[i][j]->parameters[6]==-1&&data->host_trees[i][j]->parameters[7]==-1)
// {
// CUDA_CALL(hipMalloc((void **)&data->trees[i][j].num_of_each_class,MAX_LABEL_NUMBER*sizeof(float)));
// CUDA_CALL(hipMemcpy(data->trees[i][j].num_of_each_class,data->host_trees[i][j]->num_of_each_class,
// MAX_LABEL_NUMBER*sizeof(float),hipMemcpyHostToDevice));
// }
// }
//}
////////////////////////////using global memory////////////////////////////////////////////////////
//float *host_vectorTrees=new float[(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum];
////root_GPU[currentID]->parameters[0]=root_CPU->pos1[0];
////root_GPU[currentID]->parameters[1]=root_CPU->pos1[1];
////root_GPU[currentID]->parameters[2]=root_CPU->pos2[0];
////root_GPU[currentID]->parameters[3]=root_CPU->pos2[1];
////root_GPU[currentID]->parameters[4]=root_CPU->label;
////root_GPU[currentID]->parameters[5]=root_CPU->nLevel;
////root_GPU[currentID]->parameters[8]=root_CPU->num_all;
////root_GPU[currentID]->parameters[9]=root_CPU->threshold;
////cout<<"tree num: "<<treeNum<<endl;
//
//cout<<MaxNumber<<endl;
//cout<<"assigning values\n";
//for (int i=0;i<treeNum;i++)
//{
// cout<<i<<endl;
// for (int j=0;j<MaxNumber;j++)
// {
// //cout<<i<<" "<<j<<endl;
// /* for (int k=0;k<)
// {
// }*/
// for (int k=0;k<10+MAX_LABEL_NUMBER;k++)
// {
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+k]=data->host_trees[i][j]->parameters[k];
// }
///* host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+0]=data->host_trees[i][j].pos1[0];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+1]=data->host_trees[i][j].pos1[1];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+2]=data->host_trees[i][j].pos2[0];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+3]=data->host_trees[i][j].pos2[1];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+4]=data->host_trees[i][j].label;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+5]=data->host_trees[i][j].nLevel;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+8]=data->host_trees[i][j].num_all;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+9]=data->host_trees[i][j].threshold;*/
//
//
// //if (trees_cpu[i][j].l_child==NULL)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+6]=-1;
// //}
// //if (trees_cpu[i][j].r_child==NULL)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+7]=-1;
// //}
// //for (int i=0;i<MAX_LABEL_NUMBER;i++)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+10+i]=0;
// //}
// //if ((trees_cpu[i][j].l_child==NULL)&&trees_cpu[i][j].r_child==NULL)//root
// //{
// // for (int i=0;i<labelNum;i++)
// // {
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+10+i]=trees_cpu[i][j].num_of_each_class[i];
// // }
// //}
// }
//}
//cout<<"copying values\n";
////using global memory
//CUDA_CALL(hipMalloc((void **)&data->cu_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
//CUDA_CALL(hipMemcpy(data->cu_vectorTrees,host_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float),hipMemcpyHostToDevice));
//delete []host_vectorTrees;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
//treeNum=2;
///////////////////////////////////////using texture memory//////////////////////////////////////////////////////////////////////
float *host_vectorTrees=new float[(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum];
cout<<MaxNumber<<endl;
cout<<"assigning values\n";
for (int i=0;i<treeNum;i++)
{
cout<<i<<endl;
for (int j=0;j<MaxNumber;j++)
{
for (int k=0;k<10+MAX_LABEL_NUMBER;k++)
{
host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+k]=data->host_trees[i][j]->parameters[k];
}
}
}
CUDA_CALL(hipMalloc((void **)&data->cu_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
CUDA_CALL(hipMemcpy(data->cu_vectorTrees,host_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float),hipMemcpyHostToDevice));
cout<<"width "<< (10+MAX_LABEL_NUMBER)<<" heigth:"<<MaxNumber*treeNum<<" maxNumber:"<<MaxNumber<<endl;
//CUDA_CALL(hipBindTexture2D( NULL, trees_device,
// RandomizedTreeEngine.cu_vectorTrees,
// desc, (10+MAX_LABEL_NUMBER),MaxNumber*treeNum,
// sizeof(float) * (10+MAX_LABEL_NUMBER)));
// trees_device.filterMode=hipFilterModePoint;
//CUDA_CALL(hipBindTexture2D( NULL, trees_device_1D,
// RandomizedTreeEngine.cu_vectorTrees,
//(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
//CUDA_CALL(hipBindTexture2D( NULL, currentDepthImg,
// data->cu_depthImage,
// desc, width,height,
// sizeof(float) * width));
delete []host_vectorTrees;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (int i=0;i<treeNum;i++)
{
for (int j=0;j<MaxNumber;j++)
{
delete data->host_trees[i][j];
}
delete data->host_trees[i];
}
delete []data->host_trees;
/*cout<<"tree 0 on GPU\n";
outputTree<<<(data->MaxNumber)/256+1,256>>>(data->cu_vectorTrees,data->MaxNumber,0,data->labelNum);*/
//CUDA_CALL( hipMalloc(&data->trees, data->max_num_of_trees_in_the_forest * sizeof(Node_GPU**)) );
//for (int i=0;i<treeNum;i++)
//{
// data->trees[i]=new Node_GPU *[MaxNumber];
// for (int j=0;j<MaxNumber;j++)
// {
// data->trees[i][j]=new Node_GPU();
// }
// convertTrees2Array(trees_cpu[i],data->trees[i],0,labelNum);
//}
//test
//Node_GPU*** testTree=new Node_GPU**[data->max_num_of_trees_in_the_forest];
//CUDA_CALL(hipMemcpy(testTree,data->trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU **),hipMemcpyDeviceToHost));
//for (int i=0;i<treeNum;i++)
//{
// for (int j=0;j<MaxNumber;j++)
// {
// if (data->host_trees[i][j]->l_child_ind==-1&&data->host_trees[i][j]->r_child_ind==-1)
// {
// continue;
// }
// //for (int k=0;k<labelNum;k++)
// if (j==0)
// {
// {
// cout<<"groundtruth: "<<data->host_trees[i][j]->l_child_ind<<endl;
// }
// //for (int k=0;k<labelNum;k++)
// {
// cout<<"current: "<<testTree[i][j]->l_child_ind<<endl;
// }
// }
//
//
// }
//
//}
/*Node_GPU *curent=testTree[0][0];
while(1)
{
cout<<curent->l_child_ind<<" "<<curent->r_child_ind<<endl;
if (curent->l_child_ind==-1&&curent->r_child_ind==-1)
{
cout<<curent->label<<endl;
break;
}
else if(1)
{
if (curent->l_child_ind<0)
{
break;
}
curent=testTree[0][curent->l_child_ind];
}
else
{
if (curent->r_child_ind<0)
{
break;
}
curent=testTree[0][curent->r_child_ind];
}
}
cout<<"test done!\n";*/
//data->trees=new Node_GPU[(1-pow(2,_max_depth)/(-1))];
if(!withDepth)
{
CUDA_CALL( hipMalloc(&data->cu_currentImage, MPN * sizeof(float)) );
}
else
{
CUDA_CALL( hipMalloc(&data->cu_colorImage, MPN * sizeof(float)) );
CUDA_CALL( hipMalloc(&data->cu_depthImage, MPN * sizeof(float)) );
}
CUDA_CALL( hipMalloc(&data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
//CUDA_CALL( hipMalloc(&data->cu_LabelResultEachTree, treeNum*MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
/*CUDA_CALL(hipBindTexture( NULL, detectionResult1D,
treeNum*MPN*(1+MAX_LABEL_NUMBER) * sizeof(float));*/
cout<<"labelResult GPU set"<<MPN*(1+MAX_LABEL_NUMBER)<<endl;
// maximumWidth=640;
CUDA_CALL( hipMalloc(&data->cu_LabelFullResult, MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
CUDA_CALL(hipBindTexture2D( NULL, detectionResult2D,
data->cu_LabelFullResult,
desc, 480 ,(1+MAX_LABEL_NUMBER)*640,
sizeof(float) * 480));
currentImg.filterMode=hipFilterModePoint;
currentColorImg.filterMode=hipFilterModePoint;
currentDepthImg.filterMode=hipFilterModePoint;
//data->host_LabelResult=new LabelResult[MPN];
}
__device__ void getProb(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
__device__ void getProb_depth_depth(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
//if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__device__ void getProb_depth_color(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__device__ void getProb_depth_depth_color(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
__device__ void getProb_depth(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber,int trainStyle)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else if (trainStyle==1)
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
__device__ void getProb_depth_textureTrees(int treeInd,int *pos,int *ind,int currentInd,int MaxNumber,int trainStyle)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
//2d texture
startInd=initialInd+currentInd;
pos1[0]=tex2D(trees_device,0,startInd);//trees[startInd+0];
pos1[1]=tex2D(trees_device,1,startInd);//trees[startInd+1];
pos2[0]=tex2D(trees_device,2,startInd);//trees[startInd+2];
pos2[1]=tex2D(trees_device,3,startInd);//trees[startInd+3];
label=tex2D(trees_device,4,startInd);//trees[startInd+4];
l_child_ind=tex2D(trees_device,6,startInd);
r_child_ind=tex2D(trees_device,7,startInd);
threshold=tex2D(trees_device,9,startInd);//trees[startInd+9];
/* l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];*/
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else if (trainStyle==1)
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
/* for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}*/
int ind[2]={0,0};
int startInd;
//for (i=0;i<treeNum;i++)
for (i=0;i<treeNum;i++)
{
label_prob_all[i]=0;
getProb(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
//printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
//label_prob_all[j]/=(float)treeNum;
}
}
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_textureTrees(float *result,int labelNum,int treeNum,int width,int height,int windowSize,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_textureTrees(i,pos,ind,0,MaxNumber,trainStyle);
startInd=i*MaxNumber+ind[0];
//printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
if (tex2D(trees_device,6,startInd)==-1&&tex2D(trees_device,7,startInd)==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=tex2D(trees_device,10+j,startInd);//trees[startInd+10+j];
}
//label_prob_all[j]/=(float)treeNum;
}
}
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_depth_color(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_depth_color(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
//return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms, very time consuming
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
//{
result[currentInd]=maxInd;
// tex2D(detectionResult2D,labelNum*height+pos[0],pos[1])=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
// tex2D(detectionResult2D,i*height+pos[0],pos[1])=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
//}
}
}
__global__ void getProbMap(float *result,int labelNum,int treeNum,int width,int height,int windowSize)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
result[offset]=tex2D(detectionResult2D,pos[1],pos[0]);
}
}
__global__ void predict_prob_withDepth_color(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_color(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
// return;
////find the most frequent label
int maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_depth(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_depth(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
//return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob_withDepth(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth(trees,i,pos,ind,0,MaxNumber,trainStyle);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
// return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_eachTree(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeId=blockIdx.y;
if (offset<width*height&&treeId<treeNum)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
if (treeId==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
if (treeId==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
return;
}
//double label_prob_all[MAX_LABEL_NUMBER];
//for (int i=0;i<labelNum;i++)
//{
// label_prob_all[i]=0;
//}
int ind[2]={0,0};
int startInd;
////about 210ms
////for (i=0;i<treeNum;i++)
//{
getProb_depth(trees,treeId,pos,ind,0,MaxNumber,trainStyle);
startInd=treeId*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
// //printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
//if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
//{
// int currentInd=(treeId*MPN+offset)*(1+MAX_LABEL_NUMBER);
// for (i=0;i<labelNum;i++)
// {
// //lockEXP.lock();
// result[currentInd+1+i]=trees[startInd+10+i];
// //lockEXP.unlock();
// }
//}
}
}
////maximum Ind the probability
////1+MAX_LABEL_NUMBER
//__global__ findMaximumProb(float *detectionResult,int width,int height)
//{
// __shared__ float cache[512];
// int tid=blockIdx.x*blockDim.x+threadIdx.x;
// int cacheIndex=threadIdx.x;
//
// int tmpLabel;
//
// int maximumID;
// if (tid<width*height)
// {
// tmpLabel=detectionResult[];
// cache[cacheIndex]
// }
//}
__device__ void getProb_FP(float *trees,int treeInd,int startPixelInd,int pixelStep,int width, int height, int MaxNumber,float *result,int labelNum,int treeNum)
{
int i=0;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
int currentInd;
int pos[2];
int times=0;
for (i=startPixelInd;i<width*height;i+=pixelStep)
{
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (int k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__global__ void predict_prob_fullParral_pixel_tree(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,const int MaxNumber)
{
//int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeInd=threadIdx.x;
int PixelInd=blockIdx.y*gridDim.x+blockIdx.x;
//int pixelStep=blockDim.x;
//const int TreeSize=MaxNumber*(10+10);
//const int TreeSize=MaxNumber*(10+labelNum);
//__shared__ float LocalParameters[32767*20];
//int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
//for (int ind=0;ind<(10+MAX_LABEL_NUMBER)*MaxNumber;ind++)
//{
// LocalParameters[ind]=trees[initialInd+ind];
//}
//__syncthreads();
//return;
if (treeInd<treeNum&&PixelInd<width*height)
{
int i=PixelInd;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int startInd;
int currentInd;
int pos[2];
int k;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;;
//int times=0;
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=i*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob_fullParral(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,const int MaxNumber)
{
//int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeInd=blockIdx.x;
int startPixelInd=threadIdx.x;
int pixelStep=blockDim.x;
//const int TreeSize=MaxNumber*(10+10);
//const int TreeSize=MaxNumber*(10+labelNum);
//__shared__ float LocalParameters[32767*20];
//int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
//for (int ind=0;ind<(10+MAX_LABEL_NUMBER)*MaxNumber;ind++)
//{
// LocalParameters[ind]=trees[initialInd+ind];
//}
//__syncthreads();
//return;
if (treeInd<treeNum)
{
int i=0;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int startInd;
int currentInd;
int pos[2];
int k;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;;
//int times=0;
for (i=startPixelInd;i<width*height;i+=pixelStep)
{
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
//getProb_FP(trees,treeInd,startPixelInd,pixelStep,width,height,MaxNumber);
// int pos[2];
// pos[0]=offset%width;
// pos[1]=offset/width;
// int i,j;
// if (pos[0]<=windowSize||pos[0]>=width-windowSize||
// pos[1]<=windowSize||pos[1]>=height-windowSize)
// {
// int currentInd=offset*(1+MAX_LABEL_NUMBER);
// //if (label_prob_all[maxInd]>threshold)
// {
// result[currentInd+0]=-1;
// //result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
// for (i=0;i<labelNum;i++)
// {
// result[currentInd+1+i]=0;
// }
// //cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
// }
// return;
// }
// double label_prob_all[MAX_LABEL_NUMBER];
///* for (int i=0;i<labelNum;i++)
// {
// label_prob_all[i]=0;
// }*/
// int ind[2]={0,0};
// int startInd;
// //for (i=0;i<treeNum;i++)
// for (i=0;i<treeNum;i++)
// {
// label_prob_all[i]=0;
// getProb(trees,i,pos,ind,0,MaxNumber);
// startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
// //printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
// if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
// {
//
// for (j=0;j<labelNum;j++)
// {
// label_prob_all[j]+=trees[startInd+10+j];
// }
// //label_prob_all[j]/=(float)treeNum;
// }
// }
//
// ////find the most frequent label
// int maxInd=0;
// double maxNum=-1;
// for (i=0;i<labelNum;i++)
// {
// if (label_prob_all[i]>maxNum)
// {
// maxNum=label_prob_all[i];
// maxInd=i;
// }
// }
// int currentInd=offset*(1+MAX_LABEL_NUMBER);
// //if (label_prob_all[maxInd]>threshold)
// {
// result[currentInd+0]=maxInd;
// //result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
// for (i=0;i<labelNum;i++)
// {
// result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
// }
// //cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
// }
//}
}
void setData_onrun(float *hostImg,int width,int height)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(hipMemcpy(data->cu_currentImage,hostImg,MPN*sizeof(float),hipMemcpyHostToDevice));
if (!data->hasImage)
{
CUDA_CALL(hipBindTexture2D( NULL, currentImg,
data->cu_currentImage,
desc, width,height,
sizeof(float) * width));
data->hasImage=true;
}
}
void setData_onrun(float *colorImg,float *depthImg,int width,int height)
{
//cout<<"setting data on the fly"<<endl;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(hipMemcpy(data->cu_colorImage,colorImg,MPN*sizeof(float),hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(data->cu_depthImage,depthImg,MPN*sizeof(float),hipMemcpyHostToDevice));
if (!data->hasImage)
{
CUDA_CALL(hipBindTexture2D( NULL, currentColorImg,
data->cu_colorImage,
desc, width,height,
sizeof(float) * width));
CUDA_CALL(hipBindTexture2D( NULL, currentDepthImg,
data->cu_depthImage,
desc, width,height,
sizeof(float) * width));
data->hasImage=true;
}
}
extern "C" void setData_RT_onrun(float *colorImg,float *depthImg,int width,int height)
{
//cout<<"setting data on the fly"<<endl;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(hipMemcpy(data->cu_colorImage,colorImg,MPN*sizeof(float),hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(data->cu_depthImage,depthImg,MPN*sizeof(float),hipMemcpyHostToDevice));
//if (!data->hasImage)
{
CUDA_CALL(hipBindTexture2D( NULL, currentColorImg,
data->cu_colorImage,
desc, width,height,
sizeof(float) * width));
CUDA_CALL(hipBindTexture2D( NULL, currentDepthImg,
data->cu_depthImage,
desc, width,height,
sizeof(float) * width));
//data->hasImage=true;
}
}
extern "C" void predict_GPU(float *host_img,int width,int height,float *host_result)
{
//load the trained tree
setData_onrun(host_img,width,height);
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
dim3 grid(width,height,1);
hipEvent_t start, stop;
CUDA_CALL(hipEventCreate(&start));
CUDA_CALL(hipEventCreate(&stop));
CUDA_CALL(hipEventRecord( start, 0 ));
hipLaunchKernelGGL(( predict_prob), dim3(width*height/64+1),dim3(64), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
CUDA_CALL(hipEventRecord( stop, 0 ));
CUDA_CALL(hipEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
//CUDA_CALL(hipMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),hipMemcpyDeviceToHost));
}
extern "C" void predict_GPU_withDepth(float *color_img,float *depth_img,int width,int height,float *host_result,int trainStyle)
{
//load the trained tree
setData_onrun(color_img,depth_img,width,height);
int threadsPerBlock;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
hipEvent_t start, stop;
CUDA_CALL(hipEventCreate(&start));
CUDA_CALL(hipEventCreate(&stop));
CUDA_CALL(hipEventRecord( start, 0 ));
threadsPerBlock=256;
if (trainStyle==0)
{
hipLaunchKernelGGL(( predict_prob_withDepth_depth), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==1)
{
hipLaunchKernelGGL(( predict_prob_withDepth_color), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==2)
{
hipLaunchKernelGGL(( predict_prob_withDepth_depth_color), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
// predict_prob_withDepth<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);
//each tree goes independently first
//threadsPerBlock=256;
////Lock lock;
//dim3 grid(width*height/threadsPerBlock+1,data->max_num_of_trees_in_the_forest,1);
//predict_prob_withDepth_eachTree<<<grid,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_withDepth_textureTrees<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
//data->windowSize,(data->leafnode),data->MaxNumber,trainStyle);
CUDA_CALL(hipEventRecord( stop, 0 ));
CUDA_CALL(hipEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
////////////////////////////////////////////////////////////
//find the maximum probability for each label
////////////////////////////////////////////////////////////
//CUDA_CALL(hipMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),hipMemcpyDeviceToHost));
}
extern "C" void predict_GPU_withDepth_clean(int width,int height,float *host_result,int trainStyle)
{
//load the trained tree
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
dim3 grid(width,height,1);
hipEvent_t start, stop;
CUDA_CALL(hipEventCreate(&start));
CUDA_CALL(hipEventCreate(&stop));
CUDA_CALL(hipEventRecord( start, 0 ));
int threadsPerBlock=256;
if (trainStyle==0)
{
hipLaunchKernelGGL(( predict_prob_withDepth_depth), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==1)
{
hipLaunchKernelGGL(( predict_prob_withDepth_color), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==2)
{
hipLaunchKernelGGL(( predict_prob_withDepth_depth_color), dim3(width*height/threadsPerBlock+1),dim3(threadsPerBlock), 0, 0, data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
/*predict_prob_withDepth<<<width*height/64+1,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);*/
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
CUDA_CALL(hipEventRecord( stop, 0 ));
CUDA_CALL(hipEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
//we need to find out the maximum points and the locations. Others are not neccary to transfer.
//CUDA_CALL(hipMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),hipMemcpyDeviceToHost));
}
//training examples copy
//training data: images
//cu_currentInterestIndex: current index list: [x1+ y1*width] and [x2 +y2*width]
extern "C" void setData_Training_Preprocess(float *trainingData,int num,int maximumDepth)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL( hipMalloc(&data->cu_trainingData, num * sizeof(float)) );
CUDA_CALL(hipMemcpy(data->cu_trainingData,trainingData,num*sizeof(float),hipMemcpyHostToDevice));
//100d
CUDA_CALL( hipMalloc(&data->cu_currentInterestIndex, maximumDepth*100*2 * sizeof(int)) );
}
//testData: current training data
//indexList: candidate lists
//sampleNum: the number of candidates
//startIndex: starting index of each image
__global__ void getGain(float *testData,int *indexList,int sampleNum,int *startingIndex)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
int threashold=blockIdx.y;
int i;
int l_size,r_size;
l_size=r_size=0;
float labelNum[10];
int c_startIndex;
for(i=0;i<sampleNum;i++)
{
c_startIndex=startingIndex[i];
if (testData[c_startIndex+indexList[i]]>testData[c_startIndex+indexList[i+sampleNum]]+threashold)
{
}
}
}
//currentInd: [x1 y1] [x2 y2]
//length: the number of candidates
extern "C" void Split_MaximumGain_GPU(int *currentInd,int length)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//CUDA_CALL(hipMemcpy(data->cu_currentInterestIndex,currentInd,num*sizeof(float),hipMemcpyHostToDevice));
//
////calculate gain for each possible criteria
////try every possible value first, then range
//dim3 dim(length/32+1,255);
//getGain<<<(dim,32>>>(data->cu_trainingData,data->cu_currentInterestIndex,length);
} | 915d0ad0c895123442d9440063410ba0d175c58c.cu | #include <string>
#include <fstream>
#include "CUDA_basic.h"
#include <math.h>
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_math.h>
//#include "sharedDefination.h"
using namespace std;
#include "RandomizedTreeGPU.h"
RandmizedTree_CUDA RandomizedTreeEngine;
texture<float,2> currentImg;
texture<float,2> currentColorImg,currentDepthImg;
texture<float,2,cudaReadModeElementType> trees_device;
texture<float,2> detectionResult2D;
int maximumWidth;
//texture<float> trees_device_1D;
struct Lock {
int *mutex;
Lock( void ) {
int state = 0;
CUDA_CALL( cudaMalloc( (void**)& mutex,
sizeof(int) ) );
CUDA_CALL( cudaMemcpy( mutex, &state, sizeof(int),
cudaMemcpyHostToDevice ) );
}
~Lock( void ) {
cudaFree( mutex );
}
__device__ void lock( void ) {
while( atomicCAS( mutex, 0, 1 ) != 0 );
}
__device__ void unlock( void ) {
atomicExch( mutex, 1 );
}
};
double *cu_label_prob_all;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
int currentID;
void convertTrees2Array(Node *root_CPU, Node_GPU **root_GPU,int labelNum)
{
//if (root_CPU->label!=-1)
//{
// cout<<currentID<<" "<<root_CPU->label<<endl;
//}
//cout<<currentID<<" ";
int rootID=currentID;
root_GPU[rootID]->parameters[0]=root_CPU->pos1[0];
root_GPU[rootID]->parameters[1]=root_CPU->pos1[1];
root_GPU[rootID]->parameters[2]=root_CPU->pos2[0];
root_GPU[rootID]->parameters[3]=root_CPU->pos2[1];
root_GPU[rootID]->parameters[4]=root_CPU->label;
root_GPU[rootID]->parameters[5]=root_CPU->nLevel;
root_GPU[rootID]->parameters[8]=root_CPU->num_all;
root_GPU[rootID]->parameters[9]=root_CPU->threshold;
if (root_CPU->l_child==NULL)
{
root_GPU[rootID]->parameters[6]=-1;
}
if (root_CPU->r_child==NULL)
{
root_GPU[rootID]->parameters[7]=-1;
}
if (root_CPU->l_child==NULL&&root_CPU->r_child==NULL)//root
{
//root_GPU[currentID]->num_of_each_class=new float[labelNum];
//cout<<"leafe "<<currentID<<endl;
//int ok=false;
for (int i=0;i<labelNum;i++)
{
root_GPU[rootID]->parameters[10+i]=root_CPU->num_of_each_class[i];
/* if (root_CPU->num_of_each_class[i]>0)
{
ok=true;
}*/
}
//if (!ok)
// /*cout<<rootID<<endl;*/
//{
// for (int i=0;i<labelNum;i++)
// {
// cout<<root_CPU->num_of_each_class[i]<<" ";
// }
// cout<<endl;
//}
//tree is clear!
}
if (root_CPU->l_child!=NULL)
{
currentID++;
root_GPU[rootID]->parameters[6]=currentID;
convertTrees2Array(root_CPU->l_child, root_GPU,labelNum);
}
if (root_CPU->r_child!=NULL)
{
currentID++;
root_GPU[rootID]->parameters[7]=currentID;
convertTrees2Array(root_CPU->r_child, root_GPU,labelNum);
}
}
void outputTrees(Node_GPU **tree,int ind,ofstream &out)
{
out<<tree[ind]->parameters[5]<<" ";
out<<tree[ind]->parameters[0]<<" "<<tree[ind]->parameters[1]<<" "<<
tree[ind]->parameters[2]<<" "<<tree[ind]->parameters[3]<<" "<<tree[ind]->parameters[9]<<endl;
if (tree[ind]->parameters[6]==-1&&tree[ind]->parameters[7]==-1)
{
bool ok=false;
for (int i=0;i<RandomizedTreeEngine.labelNum;i++)
{
out<<tree[ind]->parameters[10+i]<<" ";
if (tree[ind]->parameters[10+i]>0)
{
ok=true;
}
}
out<<endl;
if (!ok)
{
cout<<"bad leaf at "<<ind<<endl;
cout<<tree[ind]->parameters[4]<<" "<<tree[ind]->parameters[5]<<endl;
}
}
else
{
if (tree[ind]->parameters[6]!=-1)
{
outputTrees(tree,tree[ind]->parameters[6],out);
}
if (tree[ind]->parameters[7]!=-1)
{
outputTrees(tree,tree[ind]->parameters[7],out);
}
}
}
__global__ void outputTree(float *tree,int MaxNumber,int treeInd,int labelNum)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<MaxNumber)
{
//printf("%d %d\n",offset,(10+MAX_LABEL_NUMBER)*MaxNumber);
int initialInd=(treeInd*MaxNumber+offset)*(10+MAX_LABEL_NUMBER);
//printf("after getting values %d %d\n",tree[initialInd+6],tree[initialInd+7]);
if (tree[initialInd+6]==-1&&tree[initialInd+7]==-1)
{
int i;
for (i=0;i<labelNum;i++)
{
if(tree[initialInd+10+i]>0)
break;
}
if (i==labelNum)
{
for (int i=0;i<labelNum;i++)
{
printf("%f ",tree[initialInd+10+i]);
}
}
printf("\n");
}
}
}
extern "C" void setData_preprocess(int _max_depth,int _min_sample_count,double _regression_accuracy, int _max_num_of_trees_in_the_forest,int _windowSize, int labelNum, Node **trees_cpu,int treeNum,bool withDepth)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
data->max_depth=_max_depth;
data->min_sample_count=_min_sample_count;
data->regression_accuracy=_regression_accuracy;
data->max_num_of_trees_in_the_forest=_max_num_of_trees_in_the_forest;
data->windowSize=_windowSize;
data->labelNum=labelNum;
cout<<"begin feeding trees\n";
//conversion the tree structure into array
data->host_trees=new Node_GPU **[data->max_num_of_trees_in_the_forest];
data->MaxNumber=(1-pow((float)2,_max_depth)/(-1));
int MaxNumber=data->MaxNumber;
for (int i=0;i<treeNum;i++)
{
data->host_trees[i]=new Node_GPU *[MaxNumber];
for (int j=0;j<MaxNumber;j++)
{
data->host_trees[i][j]=new Node_GPU();
data->host_trees[i][j]->parameters=new float[10+MAX_LABEL_NUMBER];
data->host_trees[i][j]->parameters[6]=data->host_trees[i][j]->parameters[7]=-2;
data->host_trees[i][j]->parameters[4]=-1;
data->host_trees[i][j]->parameters[8]=0;
data->host_trees[i][j]->parameters[0]=data->host_trees[i][j]->parameters[1]=data->host_trees[i][j]->parameters[2]=data->host_trees[i][j]->parameters[3]=0;
data->host_trees[i][j]->parameters[9]=0;
}
currentID=0;
convertTrees2Array(trees_cpu[i],data->host_trees[i],labelNum);
cout<<i<<" "<<currentID<<endl;
}
//cout<<"left and right node index: "<<data->host_trees[0][0]->parameters[6]<<" "<<data->host_trees[0][0]->parameters[7]<<endl;
//ofstream out("tree 0.txt",ios::out);
//outputTrees(data->host_trees[0],0,out);
//out.close();
cout<<"copying trees to GPU\n";
//CUDA_CALL(cudaMalloc((void **)&data->leafnode,1*sizeof(LeafNode_GPU)));
//CUDA_CALL(cudaMalloc((void **)&data->trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU*)));
////CUDA_CALL(cudaMemcpy(data->trees,data->host_trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU **),cudaMemcpyHostToDevice));
////data->trees=new Node_GPU **[data->max_num_of_trees_in_the_forest];
//for (int i=0;i<treeNum;i++)
//{
// CUDA_CALL(cudaMalloc((void **)&data->trees[i],MaxNumber*sizeof(Node_GPU)));
//// CUDA_CALL(cudaMemcpy(data->trees[i],data->host_trees[i],MaxNumber*sizeof(Node_GPU *),cudaMemcpyHostToDevice));
// cout<<i<<endl;
// for (int j=0;j<MaxNumber;j++)
// {
// // cout<<j<<" ";
// CUDA_CALL(cudaMalloc((void **)&data->trees[i][j].cu_parameters,10*sizeof(int)));
// CUDA_CALL(cudaMemcpy(data->trees[i][j].cu_parameters,data->host_trees[i][j]->parameters,10*sizeof(int),cudaMemcpyHostToDevice));
// if (data->host_trees[i][j]->parameters[6]==-1&&data->host_trees[i][j]->parameters[7]==-1)
// {
// CUDA_CALL(cudaMalloc((void **)&data->trees[i][j].num_of_each_class,MAX_LABEL_NUMBER*sizeof(float)));
// CUDA_CALL(cudaMemcpy(data->trees[i][j].num_of_each_class,data->host_trees[i][j]->num_of_each_class,
// MAX_LABEL_NUMBER*sizeof(float),cudaMemcpyHostToDevice));
// }
// }
//}
////////////////////////////using global memory////////////////////////////////////////////////////
//float *host_vectorTrees=new float[(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum];
////root_GPU[currentID]->parameters[0]=root_CPU->pos1[0];
////root_GPU[currentID]->parameters[1]=root_CPU->pos1[1];
////root_GPU[currentID]->parameters[2]=root_CPU->pos2[0];
////root_GPU[currentID]->parameters[3]=root_CPU->pos2[1];
////root_GPU[currentID]->parameters[4]=root_CPU->label;
////root_GPU[currentID]->parameters[5]=root_CPU->nLevel;
////root_GPU[currentID]->parameters[8]=root_CPU->num_all;
////root_GPU[currentID]->parameters[9]=root_CPU->threshold;
////cout<<"tree num: "<<treeNum<<endl;
//
//cout<<MaxNumber<<endl;
//cout<<"assigning values\n";
//for (int i=0;i<treeNum;i++)
//{
// cout<<i<<endl;
// for (int j=0;j<MaxNumber;j++)
// {
// //cout<<i<<" "<<j<<endl;
// /* for (int k=0;k<)
// {
// }*/
// for (int k=0;k<10+MAX_LABEL_NUMBER;k++)
// {
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+k]=data->host_trees[i][j]->parameters[k];
// }
///* host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+0]=data->host_trees[i][j].pos1[0];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+1]=data->host_trees[i][j].pos1[1];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+2]=data->host_trees[i][j].pos2[0];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+3]=data->host_trees[i][j].pos2[1];
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+4]=data->host_trees[i][j].label;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+5]=data->host_trees[i][j].nLevel;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+8]=data->host_trees[i][j].num_all;
// host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+9]=data->host_trees[i][j].threshold;*/
//
//
// //if (trees_cpu[i][j].l_child==NULL)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+6]=-1;
// //}
// //if (trees_cpu[i][j].r_child==NULL)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+7]=-1;
// //}
// //for (int i=0;i<MAX_LABEL_NUMBER;i++)
// //{
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+10+i]=0;
// //}
// //if ((trees_cpu[i][j].l_child==NULL)&&trees_cpu[i][j].r_child==NULL)//root
// //{
// // for (int i=0;i<labelNum;i++)
// // {
// // host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+10+i]=trees_cpu[i][j].num_of_each_class[i];
// // }
// //}
// }
//}
//cout<<"copying values\n";
////using global memory
//CUDA_CALL(cudaMalloc((void **)&data->cu_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
//CUDA_CALL(cudaMemcpy(data->cu_vectorTrees,host_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float),cudaMemcpyHostToDevice));
//delete []host_vectorTrees;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
//treeNum=2;
///////////////////////////////////////using texture memory//////////////////////////////////////////////////////////////////////
float *host_vectorTrees=new float[(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum];
cout<<MaxNumber<<endl;
cout<<"assigning values\n";
for (int i=0;i<treeNum;i++)
{
cout<<i<<endl;
for (int j=0;j<MaxNumber;j++)
{
for (int k=0;k<10+MAX_LABEL_NUMBER;k++)
{
host_vectorTrees[(i*MaxNumber+j)*(10+MAX_LABEL_NUMBER)+k]=data->host_trees[i][j]->parameters[k];
}
}
}
CUDA_CALL(cudaMalloc((void **)&data->cu_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
CUDA_CALL(cudaMemcpy(data->cu_vectorTrees,host_vectorTrees,(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float),cudaMemcpyHostToDevice));
cout<<"width "<< (10+MAX_LABEL_NUMBER)<<" heigth:"<<MaxNumber*treeNum<<" maxNumber:"<<MaxNumber<<endl;
//CUDA_CALL(cudaBindTexture2D( NULL, trees_device,
// RandomizedTreeEngine.cu_vectorTrees,
// desc, (10+MAX_LABEL_NUMBER),MaxNumber*treeNum,
// sizeof(float) * (10+MAX_LABEL_NUMBER)));
// trees_device.filterMode=cudaFilterModePoint;
//CUDA_CALL(cudaBindTexture2D( NULL, trees_device_1D,
// RandomizedTreeEngine.cu_vectorTrees,
//(10+MAX_LABEL_NUMBER)*MaxNumber*treeNum*sizeof(float)));
//CUDA_CALL(cudaBindTexture2D( NULL, currentDepthImg,
// data->cu_depthImage,
// desc, width,height,
// sizeof(float) * width));
delete []host_vectorTrees;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (int i=0;i<treeNum;i++)
{
for (int j=0;j<MaxNumber;j++)
{
delete data->host_trees[i][j];
}
delete data->host_trees[i];
}
delete []data->host_trees;
/*cout<<"tree 0 on GPU\n";
outputTree<<<(data->MaxNumber)/256+1,256>>>(data->cu_vectorTrees,data->MaxNumber,0,data->labelNum);*/
//CUDA_CALL( cudaMalloc(&data->trees, data->max_num_of_trees_in_the_forest * sizeof(Node_GPU**)) );
//for (int i=0;i<treeNum;i++)
//{
// data->trees[i]=new Node_GPU *[MaxNumber];
// for (int j=0;j<MaxNumber;j++)
// {
// data->trees[i][j]=new Node_GPU();
// }
// convertTrees2Array(trees_cpu[i],data->trees[i],0,labelNum);
//}
//test
//Node_GPU*** testTree=new Node_GPU**[data->max_num_of_trees_in_the_forest];
//CUDA_CALL(cudaMemcpy(testTree,data->trees,data->max_num_of_trees_in_the_forest*sizeof(Node_GPU **),cudaMemcpyDeviceToHost));
//for (int i=0;i<treeNum;i++)
//{
// for (int j=0;j<MaxNumber;j++)
// {
// if (data->host_trees[i][j]->l_child_ind==-1&&data->host_trees[i][j]->r_child_ind==-1)
// {
// continue;
// }
// //for (int k=0;k<labelNum;k++)
// if (j==0)
// {
// {
// cout<<"groundtruth: "<<data->host_trees[i][j]->l_child_ind<<endl;
// }
// //for (int k=0;k<labelNum;k++)
// {
// cout<<"current: "<<testTree[i][j]->l_child_ind<<endl;
// }
// }
//
//
// }
//
//}
/*Node_GPU *curent=testTree[0][0];
while(1)
{
cout<<curent->l_child_ind<<" "<<curent->r_child_ind<<endl;
if (curent->l_child_ind==-1&&curent->r_child_ind==-1)
{
cout<<curent->label<<endl;
break;
}
else if(1)
{
if (curent->l_child_ind<0)
{
break;
}
curent=testTree[0][curent->l_child_ind];
}
else
{
if (curent->r_child_ind<0)
{
break;
}
curent=testTree[0][curent->r_child_ind];
}
}
cout<<"test done!\n";*/
//data->trees=new Node_GPU[(1-pow(2,_max_depth)/(-1))];
if(!withDepth)
{
CUDA_CALL( cudaMalloc(&data->cu_currentImage, MPN * sizeof(float)) );
}
else
{
CUDA_CALL( cudaMalloc(&data->cu_colorImage, MPN * sizeof(float)) );
CUDA_CALL( cudaMalloc(&data->cu_depthImage, MPN * sizeof(float)) );
}
CUDA_CALL( cudaMalloc(&data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
//CUDA_CALL( cudaMalloc(&data->cu_LabelResultEachTree, treeNum*MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
/*CUDA_CALL(cudaBindTexture( NULL, detectionResult1D,
treeNum*MPN*(1+MAX_LABEL_NUMBER) * sizeof(float));*/
cout<<"labelResult GPU set"<<MPN*(1+MAX_LABEL_NUMBER)<<endl;
// maximumWidth=640;
CUDA_CALL( cudaMalloc(&data->cu_LabelFullResult, MPN*(1+MAX_LABEL_NUMBER) * sizeof(float)) );
CUDA_CALL(cudaBindTexture2D( NULL, detectionResult2D,
data->cu_LabelFullResult,
desc, 480 ,(1+MAX_LABEL_NUMBER)*640,
sizeof(float) * 480));
currentImg.filterMode=cudaFilterModePoint;
currentColorImg.filterMode=cudaFilterModePoint;
currentDepthImg.filterMode=cudaFilterModePoint;
//data->host_LabelResult=new LabelResult[MPN];
}
__device__ void getProb(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
__device__ void getProb_depth_depth(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
//if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__device__ void getProb_depth_color(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__device__ void getProb_depth_depth_color(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
__device__ void getProb_depth(float *trees,int treeInd,int *pos,int *ind,int currentInd,int MaxNumber,int trainStyle)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else if (trainStyle==1)
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
__device__ void getProb_depth_textureTrees(int treeInd,int *pos,int *ind,int currentInd,int MaxNumber,int trainStyle)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//Node_GPU *current=&(root[currentInd]);
// printf("getProb in\n");
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*MaxNumber;
int startInd;
//int currentInd=treeInd;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
//2d texture
startInd=initialInd+currentInd;
pos1[0]=tex2D(trees_device,0,startInd);//trees[startInd+0];
pos1[1]=tex2D(trees_device,1,startInd);//trees[startInd+1];
pos2[0]=tex2D(trees_device,2,startInd);//trees[startInd+2];
pos2[1]=tex2D(trees_device,3,startInd);//trees[startInd+3];
label=tex2D(trees_device,4,startInd);//trees[startInd+4];
l_child_ind=tex2D(trees_device,6,startInd);
r_child_ind=tex2D(trees_device,7,startInd);
threshold=tex2D(trees_device,9,startInd);//trees[startInd+9];
/* l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];*/
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/* printf("leafNode \n");
for (int i=0;i<6;i++)
{
printf("%f ",trees[startInd+10+i]);
}
printf("\n");*/
ind[0]=currentInd;
ind[1]=label;
break;
}
float curDepth=1.0f/tex2D(currentDepthImg,pos[0],pos[1]);
//according to the train style
if (trainStyle==0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else if (trainStyle==1)
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (threshold>=0)
{
if (tex2D(currentDepthImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentDepthImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
else
{
if (tex2D(currentColorImg,(float)pos1[0]*curDepth+pos[0],(float)pos1[1]*curDepth+pos[1])>
tex2D(currentColorImg,(float)pos2[0]*curDepth+pos[0],(float)pos2[1]*curDepth+pos[1]))
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
/* for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}*/
int ind[2]={0,0};
int startInd;
//for (i=0;i<treeNum;i++)
for (i=0;i<treeNum;i++)
{
label_prob_all[i]=0;
getProb(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
//printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
//label_prob_all[j]/=(float)treeNum;
}
}
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_textureTrees(float *result,int labelNum,int treeNum,int width,int height,int windowSize,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_textureTrees(i,pos,ind,0,MaxNumber,trainStyle);
startInd=i*MaxNumber+ind[0];
//printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
if (tex2D(trees_device,6,startInd)==-1&&tex2D(trees_device,7,startInd)==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=tex2D(trees_device,10+j,startInd);//trees[startInd+10+j];
}
//label_prob_all[j]/=(float)treeNum;
}
}
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_depth_color(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_depth_color(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
//return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms, very time consuming
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
//{
result[currentInd]=maxInd;
// tex2D(detectionResult2D,labelNum*height+pos[0],pos[1])=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
// tex2D(detectionResult2D,i*height+pos[0],pos[1])=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
//}
}
}
__global__ void getProbMap(float *result,int labelNum,int treeNum,int width,int height,int windowSize)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
result[offset]=tex2D(detectionResult2D,pos[1],pos[0]);
}
}
__global__ void predict_prob_withDepth_color(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_color(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
// return;
////find the most frequent label
int maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_depth(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth_depth(trees,i,pos,ind,0,MaxNumber);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
//return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob_withDepth(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
if (offset<width*height)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
double label_prob_all[MAX_LABEL_NUMBER];
for (int i=0;i<labelNum;i++)
{
label_prob_all[i]=0;
}
int ind[2]={0,0};
int startInd;
//about 210ms
for (i=0;i<treeNum;i++)
{
getProb_depth(trees,i,pos,ind,0,MaxNumber,trainStyle);
startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
{
for (j=0;j<labelNum;j++)
{
label_prob_all[j]+=trees[startInd+10+j];
}
}
}
// return;
////find the most frequent label
float maxInd=0;
double maxNum=-1;
for (i=0;i<labelNum;i++)
{
if (label_prob_all[i]>maxNum)
{
maxNum=label_prob_all[i];
maxInd=i;
}
}
//about 20ms
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=maxInd;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
}
__global__ void predict_prob_withDepth_eachTree(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,int MaxNumber,int trainStyle)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeId=blockIdx.y;
if (offset<width*height&&treeId<treeNum)
{
// RandmizedTree_CUDA *data=&RandomizedTreeEngine;
/* if (offset!=100*width+100)
{
return;
}*/
// printf("%d\n",offset);
int pos[2];
pos[0]=offset%width;
pos[1]=offset/width;
if(tex2D(currentDepthImg,pos[0],pos[1])==0)
{
if (treeId==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (int i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
return;
}
int i,j;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
if (treeId==0)
{
int currentInd=offset*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=labelNum-1;;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
}
return;
}
//double label_prob_all[MAX_LABEL_NUMBER];
//for (int i=0;i<labelNum;i++)
//{
// label_prob_all[i]=0;
//}
int ind[2]={0,0};
int startInd;
////about 210ms
////for (i=0;i<treeNum;i++)
//{
getProb_depth(trees,treeId,pos,ind,0,MaxNumber,trainStyle);
startInd=treeId*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
// //printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
//if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
//{
// int currentInd=(treeId*MPN+offset)*(1+MAX_LABEL_NUMBER);
// for (i=0;i<labelNum;i++)
// {
// //lockEXP.lock();
// result[currentInd+1+i]=trees[startInd+10+i];
// //lockEXP.unlock();
// }
//}
}
}
////maximum Ind the probability
////1+MAX_LABEL_NUMBER
//__global__ findMaximumProb(float *detectionResult,int width,int height)
//{
// __shared__ float cache[512];
// int tid=blockIdx.x*blockDim.x+threadIdx.x;
// int cacheIndex=threadIdx.x;
//
// int tmpLabel;
//
// int maximumID;
// if (tid<width*height)
// {
// tmpLabel=detectionResult[];
// cache[cacheIndex]
// }
//}
__device__ void getProb_FP(float *trees,int treeInd,int startPixelInd,int pixelStep,int width, int height, int MaxNumber,float *result,int labelNum,int treeNum)
{
int i=0;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
int startInd;
int currentInd;
int pos[2];
int times=0;
for (i=startPixelInd;i<width*height;i+=pixelStep)
{
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (int k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
__global__ void predict_prob_fullParral_pixel_tree(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,const int MaxNumber)
{
//int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeInd=threadIdx.x;
int PixelInd=blockIdx.y*gridDim.x+blockIdx.x;
//int pixelStep=blockDim.x;
//const int TreeSize=MaxNumber*(10+10);
//const int TreeSize=MaxNumber*(10+labelNum);
//__shared__ float LocalParameters[32767*20];
//int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
//for (int ind=0;ind<(10+MAX_LABEL_NUMBER)*MaxNumber;ind++)
//{
// LocalParameters[ind]=trees[initialInd+ind];
//}
//__syncthreads();
//return;
if (treeInd<treeNum&&PixelInd<width*height)
{
int i=PixelInd;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int startInd;
int currentInd;
int pos[2];
int k;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;;
//int times=0;
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
if (pos[0]<=windowSize||pos[0]>=width-windowSize||
pos[1]<=windowSize||pos[1]>=height-windowSize)
{
int currentInd=i*(1+MAX_LABEL_NUMBER);
//if (label_prob_all[maxInd]>threshold)
{
result[currentInd+0]=-1;
//result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
for (i=0;i<labelNum;i++)
{
result[currentInd+1+i]=0;
}
//cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
}
return;
}
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
//suppose the maximum class number is 10
__global__ void predict_prob_fullParral(float *result,int labelNum,int treeNum,int width,int height,int windowSize,float *trees,LeafNode_GPU *leaf,const int MaxNumber)
{
//int offset=threadIdx.x+blockIdx.x*blockDim.x;
int treeInd=blockIdx.x;
int startPixelInd=threadIdx.x;
int pixelStep=blockDim.x;
//const int TreeSize=MaxNumber*(10+10);
//const int TreeSize=MaxNumber*(10+labelNum);
//__shared__ float LocalParameters[32767*20];
//int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;
//for (int ind=0;ind<(10+MAX_LABEL_NUMBER)*MaxNumber;ind++)
//{
// LocalParameters[ind]=trees[initialInd+ind];
//}
//__syncthreads();
//return;
if (treeInd<treeNum)
{
int i=0;
int l_child_ind,r_child_ind,pos1[2],pos2[2],label,threshold;
int startInd;
int currentInd;
int pos[2];
int k;
int initialInd=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;//;=treeInd*(10+MAX_LABEL_NUMBER)*MaxNumber;;
//int times=0;
for (i=startPixelInd;i<width*height;i+=pixelStep)
{
//int currentInd=treeInd;
currentInd=0;
pos[0]=i%width;
pos[1]=i/width;
while(1)
//for(int i=0;i<2;i++)
{
//printf("%d\n",startInd);
startInd=initialInd+currentInd*(10+MAX_LABEL_NUMBER);
l_child_ind=trees[startInd+6];
r_child_ind=trees[startInd+7];
pos1[0]=trees[startInd+0];
pos1[1]=trees[startInd+1];
pos2[0]=trees[startInd+2];
pos2[1]=trees[startInd+3];
label=trees[startInd+4];
threshold=trees[startInd+9];
//printf("after getting values %d %d\n",l_child_ind,r_child_ind);
if (l_child_ind==-1&&r_child_ind==-1)//leaf
//if (1)//leaf
{
/*ind[0]=currentInd;
ind[1]=label;*/
for (k=1;k<=labelNum;k++)
{
atomicAdd(&result[i*(1+MAX_LABEL_NUMBER)+k],trees[startInd+9+k]/treeNum);
}
break;
}
if (tex2D(currentImg,pos1[0]+pos[0],pos1[1]+pos[1])>
tex2D(currentImg,pos2[0]+pos[0],pos2[1]+pos[1])+threshold)
{
if (l_child_ind<0)
{
break;
}
// printf("going to left %d\n",l_child_ind);
currentInd=l_child_ind;
//current=&(root[l_child_ind]);
}
else
{
if (r_child_ind<0)//leaf
{
break;
}
// printf("going to right %d\n",r_child_ind);
currentInd=r_child_ind;
//current=&(root[r_child_ind]);
}
}
}
}
//getProb_FP(trees,treeInd,startPixelInd,pixelStep,width,height,MaxNumber);
// int pos[2];
// pos[0]=offset%width;
// pos[1]=offset/width;
// int i,j;
// if (pos[0]<=windowSize||pos[0]>=width-windowSize||
// pos[1]<=windowSize||pos[1]>=height-windowSize)
// {
// int currentInd=offset*(1+MAX_LABEL_NUMBER);
// //if (label_prob_all[maxInd]>threshold)
// {
// result[currentInd+0]=-1;
// //result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
// for (i=0;i<labelNum;i++)
// {
// result[currentInd+1+i]=0;
// }
// //cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
// }
// return;
// }
// double label_prob_all[MAX_LABEL_NUMBER];
///* for (int i=0;i<labelNum;i++)
// {
// label_prob_all[i]=0;
// }*/
// int ind[2]={0,0};
// int startInd;
// //for (i=0;i<treeNum;i++)
// for (i=0;i<treeNum;i++)
// {
// label_prob_all[i]=0;
// getProb(trees,i,pos,ind,0,MaxNumber);
// startInd=i*(10+MAX_LABEL_NUMBER)*MaxNumber+ind[0]*(10+MAX_LABEL_NUMBER);
// //printf("LI: %d, RI: %d",trees[startInd+6],trees[startInd+7]);
// if (trees[startInd+6]==-1&&trees[startInd+7]==-1) //reach a leaf
// {
//
// for (j=0;j<labelNum;j++)
// {
// label_prob_all[j]+=trees[startInd+10+j];
// }
// //label_prob_all[j]/=(float)treeNum;
// }
// }
//
// ////find the most frequent label
// int maxInd=0;
// double maxNum=-1;
// for (i=0;i<labelNum;i++)
// {
// if (label_prob_all[i]>maxNum)
// {
// maxNum=label_prob_all[i];
// maxInd=i;
// }
// }
// int currentInd=offset*(1+MAX_LABEL_NUMBER);
// //if (label_prob_all[maxInd]>threshold)
// {
// result[currentInd+0]=maxInd;
// //result[offset].prob=label_prob_all[maxInd]/(float)treeNum;
// for (i=0;i<labelNum;i++)
// {
// result[currentInd+1+i]=label_prob_all[i]/(float)treeNum;
// }
// //cout<<result.label<<" "<<label_prob_all[maxInd]<<" "<<treeNum<<" "<<result.prob<<endl;
// }
//}
}
void setData_onrun(float *hostImg,int width,int height)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(cudaMemcpy(data->cu_currentImage,hostImg,MPN*sizeof(float),cudaMemcpyHostToDevice));
if (!data->hasImage)
{
CUDA_CALL(cudaBindTexture2D( NULL, currentImg,
data->cu_currentImage,
desc, width,height,
sizeof(float) * width));
data->hasImage=true;
}
}
void setData_onrun(float *colorImg,float *depthImg,int width,int height)
{
//cout<<"setting data on the fly"<<endl;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(cudaMemcpy(data->cu_colorImage,colorImg,MPN*sizeof(float),cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(data->cu_depthImage,depthImg,MPN*sizeof(float),cudaMemcpyHostToDevice));
if (!data->hasImage)
{
CUDA_CALL(cudaBindTexture2D( NULL, currentColorImg,
data->cu_colorImage,
desc, width,height,
sizeof(float) * width));
CUDA_CALL(cudaBindTexture2D( NULL, currentDepthImg,
data->cu_depthImage,
desc, width,height,
sizeof(float) * width));
data->hasImage=true;
}
}
extern "C" void setData_RT_onrun(float *colorImg,float *depthImg,int width,int height)
{
//cout<<"setting data on the fly"<<endl;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL(cudaMemcpy(data->cu_colorImage,colorImg,MPN*sizeof(float),cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(data->cu_depthImage,depthImg,MPN*sizeof(float),cudaMemcpyHostToDevice));
//if (!data->hasImage)
{
CUDA_CALL(cudaBindTexture2D( NULL, currentColorImg,
data->cu_colorImage,
desc, width,height,
sizeof(float) * width));
CUDA_CALL(cudaBindTexture2D( NULL, currentDepthImg,
data->cu_depthImage,
desc, width,height,
sizeof(float) * width));
//data->hasImage=true;
}
}
extern "C" void predict_GPU(float *host_img,int width,int height,float *host_result)
{
//load the trained tree
setData_onrun(host_img,width,height);
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
dim3 grid(width,height,1);
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
CUDA_CALL(cudaEventRecord( start, 0 ));
predict_prob<<<width*height/64+1,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
CUDA_CALL(cudaEventRecord( stop, 0 ));
CUDA_CALL(cudaEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
//CUDA_CALL(cudaMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),cudaMemcpyDeviceToHost));
}
extern "C" void predict_GPU_withDepth(float *color_img,float *depth_img,int width,int height,float *host_result,int trainStyle)
{
//load the trained tree
setData_onrun(color_img,depth_img,width,height);
int threadsPerBlock;
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
CUDA_CALL(cudaEventRecord( start, 0 ));
threadsPerBlock=256;
if (trainStyle==0)
{
predict_prob_withDepth_depth<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==1)
{
predict_prob_withDepth_color<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==2)
{
predict_prob_withDepth_depth_color<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
// predict_prob_withDepth<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);
//each tree goes independently first
//threadsPerBlock=256;
////Lock lock;
//dim3 grid(width*height/threadsPerBlock+1,data->max_num_of_trees_in_the_forest,1);
//predict_prob_withDepth_eachTree<<<grid,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_withDepth_textureTrees<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
//data->windowSize,(data->leafnode),data->MaxNumber,trainStyle);
CUDA_CALL(cudaEventRecord( stop, 0 ));
CUDA_CALL(cudaEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
////////////////////////////////////////////////////////////
//find the maximum probability for each label
////////////////////////////////////////////////////////////
//CUDA_CALL(cudaMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),cudaMemcpyDeviceToHost));
}
extern "C" void predict_GPU_withDepth_clean(int width,int height,float *host_result,int trainStyle)
{
//load the trained tree
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
dim3 grid(width,height,1);
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
CUDA_CALL(cudaEventRecord( start, 0 ));
int threadsPerBlock=256;
if (trainStyle==0)
{
predict_prob_withDepth_depth<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==1)
{
predict_prob_withDepth_color<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
else if (trainStyle==2)
{
predict_prob_withDepth_depth_color<<<width*height/threadsPerBlock+1,threadsPerBlock>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
}
/*predict_prob_withDepth<<<width*height/64+1,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber,trainStyle);*/
//predict_prob_fullParral<<<data->max_num_of_trees_in_the_forest,64>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
//predict_prob_fullParral_pixel_tree<<<grid,16>>>(data->cu_LabelResult,data->labelNum,data->max_num_of_trees_in_the_forest,width,height,
// data->windowSize,data->cu_vectorTrees,(data->leafnode),data->MaxNumber);
CUDA_CALL(cudaEventRecord( stop, 0 ));
CUDA_CALL(cudaEventSynchronize( stop ));
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
cout<<"time: "<<elapsedTime<<" ms"<<endl;
//we need to find out the maximum points and the locations. Others are not neccary to transfer.
//CUDA_CALL(cudaMemcpy(data->cu_LabelResult,host_result,MPN*sizeof(LabelResult),cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(host_result,data->cu_LabelResult, MPN*(1+MAX_LABEL_NUMBER)*sizeof(float),cudaMemcpyDeviceToHost));
}
//training examples copy
//training data: images
//cu_currentInterestIndex: current index list: [x1+ y1*width] and [x2 +y2*width]
extern "C" void setData_Training_Preprocess(float *trainingData,int num,int maximumDepth)
{
RandmizedTree_CUDA *data=&RandomizedTreeEngine;
CUDA_CALL( cudaMalloc(&data->cu_trainingData, num * sizeof(float)) );
CUDA_CALL(cudaMemcpy(data->cu_trainingData,trainingData,num*sizeof(float),cudaMemcpyHostToDevice));
//100d
CUDA_CALL( cudaMalloc(&data->cu_currentInterestIndex, maximumDepth*100*2 * sizeof(int)) );
}
//testData: current training data
//indexList: candidate lists
//sampleNum: the number of candidates
//startIndex: starting index of each image
__global__ void getGain(float *testData,int *indexList,int sampleNum,int *startingIndex)
{
int offset=threadIdx.x+blockIdx.x*blockDim.x;
int threashold=blockIdx.y;
int i;
int l_size,r_size;
l_size=r_size=0;
float labelNum[10];
int c_startIndex;
for(i=0;i<sampleNum;i++)
{
c_startIndex=startingIndex[i];
if (testData[c_startIndex+indexList[i]]>testData[c_startIndex+indexList[i+sampleNum]]+threashold)
{
}
}
}
//currentInd: [x1 y1] [x2 y2]
//length: the number of candidates
extern "C" void Split_MaximumGain_GPU(int *currentInd,int length)
{
//RandmizedTree_CUDA *data=&RandomizedTreeEngine;
//CUDA_CALL(cudaMemcpy(data->cu_currentInterestIndex,currentInd,num*sizeof(float),cudaMemcpyHostToDevice));
//
////calculate gain for each possible criteria
////try every possible value first, then range
//dim3 dim(length/32+1,255);
//getGain<<<(dim,32>>>(data->cu_trainingData,data->cu_currentInterestIndex,length);
} |
ea77c417ff770d1ca1c7fd37ed9248bc393fe591.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
} | ea77c417ff770d1ca1c7fd37ed9248bc393fe591.cu | #include "includes.h"
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
} |
33f980435592e1caa1d741f63758853745692d97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file receiver.cu
* @brief Implement a receiver
* @author John Melton, G0ORX/N6LYT
*/
/* Copyright (C)
* 2015 - John Melton, G0ORX/N6LYT
*
* Based on code by Steven Passe AD0ES and Vasiliy Gokoyev K3IT
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <errno.h>
#include <pthread.h>
#include <sched.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/if_ether.h>
#include <netpacket/packet.h>
#include <net/if_packet.h>
#include <hipfft.h>
#include <helper_cuda.h>
#include "common_hip.cuh"
#include "receiver.cuh"
#include "dfc.cuh"
#include "inputbuffer.cuh"
#include "rawbuffer.cuh"
#include "filters.cuh"
#include "hermes.cuh"
#include "time.cuh"
#define SCALE_FACTOR 8388607.0
float scale_factor=0.25F;
RECEIVER receiver[MAX_RECEIVER];
void* receiverThread(void* arg);
void initReceiver(int rx) {
int result;
hipError_t error;
RECEIVER* r;
fprintf(stderr,"initReceiver %d: scale_factor=%f\n",rx,scale_factor);
r=&receiver[rx];
r->id=rx;
error = hipHostMalloc(&r->receiverdata, COMPLEX_SIGNAL_SIZE*sizeof(hipfftComplex), hipHostMallocMapped);
if (error != hipSuccess) {
fprintf(stderr, "initReceiver: Error hipHostMalloc for receiver data %d\n", error);
exit(EXIT_FAILURE);
}
error = hipHostGetDevicePointer(&(r->deviceReceiverdata), r->receiverdata, 0);
if (error != hipSuccess) {
fprintf(stderr, "initReceiver: Error receiverdata hipHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"slice size=%d\n",(COMPLEX_SIGNAL_SIZE/D_SIZE_384K)*sizeof(hipfftComplex));
error = hipHostMalloc(&r->slice, (COMPLEX_SIGNAL_SIZE/D_SIZE_384K)*sizeof(hipfftComplex), hipHostMallocMapped);
if (error != hipSuccess) {
fprintf(stderr, "processReceiverData: Error hipHostMalloc for slice data %d\n", error);
exit(EXIT_FAILURE);
}
error = hipHostGetDevicePointer(&r->deviceSlice, r->slice, 0);
if (error != hipSuccess) {
fprintf(stderr, "processReceiveData: Error slice data hipHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"RX_TD_MAXSIZE=%d\n",RX_TD_MAXSIZE);
//fprintf(stderr,"decimate size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(hipfftComplex)));
error = hipHostMalloc(&r->decimate, RX_TD_MAXSIZE*sizeof(hipfftComplex), hipHostMallocMapped);
if (error != hipSuccess) {
fprintf(stderr, "processReceiverData: Error hipHostMalloc for decimate %d\n", error);
exit(EXIT_FAILURE);
}
error = hipHostGetDevicePointer(&r->deviceDecimate, r->decimate, 0);
if (error != hipSuccess) {
fprintf(stderr, "processReceiveData: Error decimate hipHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"tdoutput size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(hipfftComplex)));
error = hipHostMalloc(&r->tdOutput, RX_TD_MAXSIZE*sizeof(hipfftComplex), hipHostMallocMapped);
if (error != hipSuccess) {
fprintf(stderr, "initHermes: Error hipHostMalloc for td output data %d\n", error);
exit(EXIT_FAILURE);
}
error = hipHostGetDevicePointer(&r->deviceTdOutput, r->tdOutput, 0);
if (error != hipSuccess) {
fprintf(stderr, "initHermes: Error td output hipHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"output size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(char)*6));
error = hipHostMalloc(&r->output, RX_TD_MAXSIZE*sizeof(char)*6, hipHostMallocMapped);
if (error != hipSuccess) {
fprintf(stderr, "initHermes: Error hipHostMalloc for output data %d\n", error);
exit(EXIT_FAILURE);
}
error = hipHostGetDevicePointer(&r->deviceOutput, r->output, 0);
if (error != hipSuccess) {
fprintf(stderr, "initHermes: Error output hipHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
result=sem_init(&r->inputReady, 0, 0);
if(result!=0) {
fprintf(stderr,"initReceiver %d: sem_init failed for inputReady%d\n", rx, result);
exit(EXIT_FAILURE);
}
result=sem_init(&r->outputReady, 0, 0);
if(result!=0) {
fprintf(stderr,"initReceiver %d: sem_init failed for outputReady%d\n", rx, result);
exit(EXIT_FAILURE);
}
if((result=pthread_create(&r->receiverThreadId, NULL, receiverThread, r)) < 0) {
fprintf(stderr, "receiverThread create failed %d\n",result);
exit(EXIT_FAILURE);
}
}
__global__ void
gpu_mix_and_convolve(const hipfftComplex* d_fft, const hipfftComplex* d_fir_fft,
hipfftComplex* d_receiver, const int nrot,
const float scale, int d_size)
{
const size_t numThreads = blockDim.x * gridDim.x;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t new_index;
for (int i = tid; i < COMPLEX_SIGNAL_SIZE; i += numThreads) {
new_index = (i >= nrot) ? i - nrot : COMPLEX_SIGNAL_SIZE - nrot + i;
// Skip computing unneeded bins.
if (new_index > COMPLEX_SIGNAL_SIZE / d_size)
continue;
d_receiver[new_index] = ComplexScale(ComplexMul(d_fft[i], d_fir_fft[new_index]), scale);
}
}
__global__ void
gpu_decimate(const hipfftComplex* deviceReceiver, hipfftComplex* deviceSlice, int d_size, int outrot) {
const int threadId = blockIdx.x * blockDim.x + threadIdx.x;
size_t new_index;
new_index = (threadId >= outrot) ? threadId - outrot : COMPLEX_SIGNAL_SIZE - outrot + threadId;
deviceSlice[threadId] = deviceReceiver[new_index];
}
__global__ void
gpu_ifft_postprocess(const hipfftComplex* d_slice, hipfftComplex* d_rx_td,
char* d_rx_td_24bit, int decimate, int d_size, int rx_td_size,float scale_factor
)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= rx_td_size)
return;
int idx = tid * decimate + (P_SIZE-1)/2/d_size;
d_rx_td[tid]=d_slice[idx];
// Note: I & Q must be swapped.
long tempQ = (long)((double)d_rx_td[tid].x * scale_factor); //* SCALE_FACTOR);
long tempI = (long)((double)d_rx_td[tid].y * scale_factor); //* SCALE_FACTOR);
// Load samples in big endian format.
int baseindex = tid * 6; // start of the 24 bit sample
d_rx_td_24bit[baseindex++] = (char)((tempI >> 16) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempI >> 8) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempI >> 0) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 16) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 8) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 0) & 0xff);
}
void* receiverThread(void* arg) {
int result;
hipError_t error;
RECEIVER* r=(RECEIVER*)arg;
#ifdef TIMING
long long starttime;
long long endtime;
#endif
fprintf(stderr,"receiverThread %d: running on cpu %d\n", r->id, sched_getcpu());
while(1) {
result=sem_wait(&r->inputReady);
if(result!=0) {
fprintf(stderr, "receiverThread: sem_wait failed for inputReady: %d\n", result);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"gpu_mix_and_convolve<<<%d,%d>>> rx=%d rotate=%d, scale=%f, d_size=%d\n", COMPLEX_SIGNAL_SIZE/8192,1024,r->id,r->rotate,r->scale,r->d_size);
hipLaunchKernelGGL(( gpu_mix_and_convolve), dim3(COMPLEX_SIGNAL_SIZE/8192), dim3(1024), 0, 0,
deviceFrequencysamples, r->deviceFilter, r->deviceReceiverdata, r->rotate, r->scale, r->d_size);
/*
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_mix_and_convolve CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
*/
//gpu_decimate<<<COMPLEX_SIGNAL_SIZE/1024/r->d_size, 1024>>>
hipLaunchKernelGGL(( gpu_decimate), dim3(COMPLEX_SIGNAL_SIZE/8192), dim3(1024), 0, 0,
r->deviceReceiverdata, r->deviceSlice, r->d_size, r->outrot );
/*
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_decimate CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
*/
// inverse FFT
hipfftResult err=hipfftExecC2C(r->planC2C, r->deviceSlice, r->deviceSlice, HIPFFT_BACKWARD);
if(err!=HIPFFT_SUCCESS) {
fprintf(stderr,"Error executing planC2C for input buffer: %s\n", _cudaGetErrorEnum(err));
exit(EXIT_FAILURE);
}
// convert to 24 bit samples
hipLaunchKernelGGL(( gpu_ifft_postprocess), dim3(r->rx_td_size/1024 + 1), dim3(1024), 0, 0,
r->deviceSlice, r->deviceTdOutput, r->deviceOutput, r->ifft_decimate_factor, r->d_size, r->rx_td_size, scale_factor);
// need to sync as last stage
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_ifft_postprocess CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
result=sem_post(&r->outputReady);
if(result!=0) {
fprintf(stderr, "receiverThread: sem_post failed for outputReady: %d\n", result);
exit(EXIT_FAILURE);
}
}
}
| 33f980435592e1caa1d741f63758853745692d97.cu | /**
* @file receiver.cu
* @brief Implement a receiver
* @author John Melton, G0ORX/N6LYT
*/
/* Copyright (C)
* 2015 - John Melton, G0ORX/N6LYT
*
* Based on code by Steven Passe AD0ES and Vasiliy Gokoyev K3IT
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <errno.h>
#include <pthread.h>
#include <sched.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/if_ether.h>
#include <netpacket/packet.h>
#include <net/if_packet.h>
#include <cufft.h>
#include <helper_cuda.h>
#include "common.cuh"
#include "receiver.cuh"
#include "dfc.cuh"
#include "inputbuffer.cuh"
#include "rawbuffer.cuh"
#include "filters.cuh"
#include "hermes.cuh"
#include "time.cuh"
#define SCALE_FACTOR 8388607.0
float scale_factor=0.25F;
RECEIVER receiver[MAX_RECEIVER];
void* receiverThread(void* arg);
void initReceiver(int rx) {
int result;
cudaError_t error;
RECEIVER* r;
fprintf(stderr,"initReceiver %d: scale_factor=%f\n",rx,scale_factor);
r=&receiver[rx];
r->id=rx;
error = cudaHostAlloc(&r->receiverdata, COMPLEX_SIGNAL_SIZE*sizeof(cufftComplex), cudaHostAllocMapped);
if (error != cudaSuccess) {
fprintf(stderr, "initReceiver: Error cudaHostAlloc for receiver data %d\n", error);
exit(EXIT_FAILURE);
}
error = cudaHostGetDevicePointer(&(r->deviceReceiverdata), r->receiverdata, 0);
if (error != cudaSuccess) {
fprintf(stderr, "initReceiver: Error receiverdata cudaHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"slice size=%d\n",(COMPLEX_SIGNAL_SIZE/D_SIZE_384K)*sizeof(cufftComplex));
error = cudaHostAlloc(&r->slice, (COMPLEX_SIGNAL_SIZE/D_SIZE_384K)*sizeof(cufftComplex), cudaHostAllocMapped);
if (error != cudaSuccess) {
fprintf(stderr, "processReceiverData: Error cudaHostAlloc for slice data %d\n", error);
exit(EXIT_FAILURE);
}
error = cudaHostGetDevicePointer(&r->deviceSlice, r->slice, 0);
if (error != cudaSuccess) {
fprintf(stderr, "processReceiveData: Error slice data cudaHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"RX_TD_MAXSIZE=%d\n",RX_TD_MAXSIZE);
//fprintf(stderr,"decimate size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(cufftComplex)));
error = cudaHostAlloc(&r->decimate, RX_TD_MAXSIZE*sizeof(cufftComplex), cudaHostAllocMapped);
if (error != cudaSuccess) {
fprintf(stderr, "processReceiverData: Error cudaHostAlloc for decimate %d\n", error);
exit(EXIT_FAILURE);
}
error = cudaHostGetDevicePointer(&r->deviceDecimate, r->decimate, 0);
if (error != cudaSuccess) {
fprintf(stderr, "processReceiveData: Error decimate cudaHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"tdoutput size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(cufftComplex)));
error = cudaHostAlloc(&r->tdOutput, RX_TD_MAXSIZE*sizeof(cufftComplex), cudaHostAllocMapped);
if (error != cudaSuccess) {
fprintf(stderr, "initHermes: Error cudaHostAlloc for td output data %d\n", error);
exit(EXIT_FAILURE);
}
error = cudaHostGetDevicePointer(&r->deviceTdOutput, r->tdOutput, 0);
if (error != cudaSuccess) {
fprintf(stderr, "initHermes: Error td output cudaHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"output size=%d\n",(int)(RX_TD_MAXSIZE*sizeof(char)*6));
error = cudaHostAlloc(&r->output, RX_TD_MAXSIZE*sizeof(char)*6, cudaHostAllocMapped);
if (error != cudaSuccess) {
fprintf(stderr, "initHermes: Error cudaHostAlloc for output data %d\n", error);
exit(EXIT_FAILURE);
}
error = cudaHostGetDevicePointer(&r->deviceOutput, r->output, 0);
if (error != cudaSuccess) {
fprintf(stderr, "initHermes: Error output cudaHostGetDevicePointer %d\n", error);
exit(EXIT_FAILURE);
}
result=sem_init(&r->inputReady, 0, 0);
if(result!=0) {
fprintf(stderr,"initReceiver %d: sem_init failed for inputReady%d\n", rx, result);
exit(EXIT_FAILURE);
}
result=sem_init(&r->outputReady, 0, 0);
if(result!=0) {
fprintf(stderr,"initReceiver %d: sem_init failed for outputReady%d\n", rx, result);
exit(EXIT_FAILURE);
}
if((result=pthread_create(&r->receiverThreadId, NULL, receiverThread, r)) < 0) {
fprintf(stderr, "receiverThread create failed %d\n",result);
exit(EXIT_FAILURE);
}
}
__global__ void
gpu_mix_and_convolve(const cufftComplex* d_fft, const cufftComplex* d_fir_fft,
cufftComplex* d_receiver, const int nrot,
const float scale, int d_size)
{
const size_t numThreads = blockDim.x * gridDim.x;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t new_index;
for (int i = tid; i < COMPLEX_SIGNAL_SIZE; i += numThreads) {
new_index = (i >= nrot) ? i - nrot : COMPLEX_SIGNAL_SIZE - nrot + i;
// Skip computing unneeded bins.
if (new_index > COMPLEX_SIGNAL_SIZE / d_size)
continue;
d_receiver[new_index] = ComplexScale(ComplexMul(d_fft[i], d_fir_fft[new_index]), scale);
}
}
__global__ void
gpu_decimate(const cufftComplex* deviceReceiver, cufftComplex* deviceSlice, int d_size, int outrot) {
const int threadId = blockIdx.x * blockDim.x + threadIdx.x;
size_t new_index;
new_index = (threadId >= outrot) ? threadId - outrot : COMPLEX_SIGNAL_SIZE - outrot + threadId;
deviceSlice[threadId] = deviceReceiver[new_index];
}
__global__ void
gpu_ifft_postprocess(const cufftComplex* d_slice, cufftComplex* d_rx_td,
char* d_rx_td_24bit, int decimate, int d_size, int rx_td_size,float scale_factor
)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= rx_td_size)
return;
int idx = tid * decimate + (P_SIZE-1)/2/d_size;
d_rx_td[tid]=d_slice[idx];
// Note: I & Q must be swapped.
long tempQ = (long)((double)d_rx_td[tid].x * scale_factor); //* SCALE_FACTOR);
long tempI = (long)((double)d_rx_td[tid].y * scale_factor); //* SCALE_FACTOR);
// Load samples in big endian format.
int baseindex = tid * 6; // start of the 24 bit sample
d_rx_td_24bit[baseindex++] = (char)((tempI >> 16) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempI >> 8) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempI >> 0) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 16) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 8) & 0xff);
d_rx_td_24bit[baseindex++] = (char)((tempQ >> 0) & 0xff);
}
void* receiverThread(void* arg) {
int result;
cudaError_t error;
RECEIVER* r=(RECEIVER*)arg;
#ifdef TIMING
long long starttime;
long long endtime;
#endif
fprintf(stderr,"receiverThread %d: running on cpu %d\n", r->id, sched_getcpu());
while(1) {
result=sem_wait(&r->inputReady);
if(result!=0) {
fprintf(stderr, "receiverThread: sem_wait failed for inputReady: %d\n", result);
exit(EXIT_FAILURE);
}
//fprintf(stderr,"gpu_mix_and_convolve<<<%d,%d>>> rx=%d rotate=%d, scale=%f, d_size=%d\n", COMPLEX_SIGNAL_SIZE/8192,1024,r->id,r->rotate,r->scale,r->d_size);
gpu_mix_and_convolve<<<COMPLEX_SIGNAL_SIZE/8192, 1024>>>
(deviceFrequencysamples, r->deviceFilter, r->deviceReceiverdata, r->rotate, r->scale, r->d_size);
/*
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_mix_and_convolve CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
*/
//gpu_decimate<<<COMPLEX_SIGNAL_SIZE/1024/r->d_size, 1024>>>
gpu_decimate<<<COMPLEX_SIGNAL_SIZE/8192, 1024>>>
(r->deviceReceiverdata, r->deviceSlice, r->d_size, r->outrot );
/*
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_decimate CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
*/
// inverse FFT
cufftResult err=cufftExecC2C(r->planC2C, r->deviceSlice, r->deviceSlice, CUFFT_INVERSE);
if(err!=CUFFT_SUCCESS) {
fprintf(stderr,"Error executing planC2C for input buffer: %s\n", _cudaGetErrorEnum(err));
exit(EXIT_FAILURE);
}
// convert to 24 bit samples
gpu_ifft_postprocess<<<r->rx_td_size/1024 + 1, 1024>>>
(r->deviceSlice, r->deviceTdOutput, r->deviceOutput, r->ifft_decimate_factor, r->d_size, r->rx_td_size, scale_factor);
// need to sync as last stage
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess) {
// print the CUDA error message and exit
fprintf(stderr,"gpu_ifft_postprocess CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
result=sem_post(&r->outputReady);
if(result!=0) {
fprintf(stderr, "receiverThread: sem_post failed for outputReady: %d\n", result);
exit(EXIT_FAILURE);
}
}
}
|
b0f11749c3a13d6e19d274da4923be17f0ac8601.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU
Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com
Attribute-ZLib license (* See additional part 4)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
4. Any published work based on this code must include public acknowledgement
of the origin. This includes following when applicable:
- Journal/Paper publications. Credited by reference to work in text & citation.
- Public presentations. Credited in at least one slide.
- Distributed Games/Apps. Credited as single line in game or app credit page.
Retaining this additional license term is required in derivative works.
Acknowledgement may be provided as:
Publication version:
2012-2013, Hoetzlein, Rama C. Fluids v.3 - A Large-Scale, Open Source
Fluid Simulator. Published online at: http://fluids3.com
Single line (slides or app credits):
GPU Fluids: Rama C. Hoetzlein (Fluids v3 2013)
Notes on Clause 4:
The intent of this clause is public attribution for this contribution, not code use restriction.
Both commerical and open source projects may redistribute and reuse without code release.
However, clause #1 of ZLib indicates that "you must not claim that you wrote the original software".
Clause #4 makes this more specific by requiring public acknowledgement to be extended to
derivative licenses.
*/
#ifdef WIN32
#include <conio.h>
#endif
//#include <cutil.h> // cutil32.lib
#include "cutil_math.h" // cutil32.lib
#include <string.h>
#include <assert.h>
#ifdef WIN32
#include <windows.h>
#endif
//#include <cuda_gl_interop.h>
#include <stdio.h>
#include <math.h>
extern void app_printf ( char* format, ... );
extern void app_printEXIT ( char* format, ... );
extern char app_getch ();
#include "fluid_system_host.cuh"
#include "fluid_system_kern.cuh"
FluidParams fcuda; // CPU Fluid params
FluidParams* mcuda; // GPU Fluid params
bufList fbuf; // GPU Particle buffers
bool cudaCheck ( hipError_t status, char* msg )
{
if ( status != hipSuccess ) {
app_printf ( "CUDA ERROR: %s\n", hipGetErrorString ( status ) );
app_getch ();
// MessageBox ( NULL, hipGetErrorString ( status), msg, MB_OK );
return false;
} else {
//app_printf ( "%s. OK.\n", msg );
}
return true;
}
void cudaExit ()
{
int argc = 1;
char* argv[] = {"fluids"};
hipDeviceReset();
}
// Initialize CUDA
void cudaInit()
{
int argc = 1;
char* argv[] = {"fluids"};
int count = 0;
int i = 0;
hipError_t err = hipGetDeviceCount(&count);
if ( err==hipErrorInsufficientDriver) { app_printEXIT( "CUDA driver not installed.\n"); }
if ( err==hipErrorNoDevice) { app_printEXIT ( "No CUDA device found.\n"); }
if ( count == 0) { app_printEXIT ( "No CUDA device found.\n"); }
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
if(prop.major >= 1) break;
}
if(i == count) { app_printEXIT ( "No CUDA device found.\n"); }
hipSetDevice(i);
app_printf( "CUDA initialized.\n");
hipDeviceProp_t p;
hipGetDeviceProperties ( &p, 0);
app_printf ( "-- CUDA --\n" );
app_printf ( "Name: %s\n", p.name );
app_printf ( "Revision: %d.%d\n", p.major, p.minor );
app_printf ( "Global Mem: %d\n", p.totalGlobalMem );
app_printf ( "Shared/Blk: %d\n", p.sharedMemPerBlock );
app_printf ( "Regs/Blk: %d\n", p.regsPerBlock );
app_printf ( "Warp Size: %d\n", p.warpSize );
app_printf ( "Mem Pitch: %d\n", p.memPitch );
app_printf ( "Thrds/Blk: %d\n", p.maxThreadsPerBlock );
app_printf ( "Const Mem: %d\n", p.totalConstMem );
app_printf ( "Clock Rate: %d\n", p.clockRate );
fbuf.mgridactive = 0x0;
// Allocate the sim parameters
cudaCheck ( hipMalloc ( (void**) &mcuda, sizeof(FluidParams) ), "Malloc FluidParams mcuda" );
// Allocate particle buffers
cudaCheck ( hipMalloc ( (void**) &fbuf.mpos, sizeof(float)*3 ), "Malloc mpos" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mvel, sizeof(float)*3), "Malloc mvel" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mveleval, sizeof(float)*3), "Malloc mveleval" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mforce, sizeof(float)*3), "Malloc mforce" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mpress, sizeof(float) ), "Malloc mpress" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mdensity, sizeof(float) ), "Malloc mdensity" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgcell, sizeof(uint)), "Malloc mgcell" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgndx, sizeof(uint)), "Malloc mgndx" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mclr, sizeof(uint)), "Malloc mclr" );
cudaCheck ( hipMalloc ( (void**) &fbuf.msortbuf, sizeof(uint) ), "Malloc msortbu" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgrid, 1 ), "Malloc mgrid" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridcnt, 1 ), "Malloc mgridcnt" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridoff, 1 ), "Malloc mgridoff" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridactive, 1 ), "Malloc mgridactive");
//cudaCheck ( hipMalloc ( (void**) &fbuf.mcluster, sizeof(uint) ) );
preallocBlockSumsInt ( 1 );
};
// Compute number of blocks to create
int iDivUp (int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads)
{
numThreads = min( maxThreads, numPnts );
numBlocks = iDivUp ( numPnts, numThreads );
}
void FluidClearCUDA ()
{
cudaCheck ( hipFree ( fbuf.mpos ), "Free mpos" );
cudaCheck ( hipFree ( fbuf.mvel ), "Free mvel" );
cudaCheck ( hipFree ( fbuf.mveleval ), "Free mveleval" );
cudaCheck ( hipFree ( fbuf.mforce ), "Free mforce" );
cudaCheck ( hipFree ( fbuf.mpress ), "Free mpress");
cudaCheck ( hipFree ( fbuf.mdensity ), "Free mdensity" );
cudaCheck ( hipFree ( fbuf.mgcell ), "Free mgcell" );
cudaCheck ( hipFree ( fbuf.mgndx ), "Free mgndx" );
cudaCheck ( hipFree ( fbuf.mclr ), "Free mclr" );
//cudaCheck ( hipFree ( fbuf.mcluster ) );
cudaCheck ( hipFree ( fbuf.msortbuf ), "Free msortbuf" );
cudaCheck ( hipFree ( fbuf.mgrid ), "Free mgrid" );
cudaCheck ( hipFree ( fbuf.mgridcnt ), "Free mgridcnt" );
cudaCheck ( hipFree ( fbuf.mgridoff ), "Free mgridoff" );
cudaCheck ( hipFree ( fbuf.mgridactive ), "Free mgridactive" );
}
void FluidSetupCUDA ( int num, int gsrch, int3 res, float3 size, float3 delta, float3 gmin, float3 gmax, int total, int chk )
{
fcuda.pnum = num;
fcuda.gridRes = res;
fcuda.gridSize = size;
fcuda.gridDelta = delta;
fcuda.gridMin = gmin;
fcuda.gridMax = gmax;
fcuda.gridTotal = total;
fcuda.gridSrch = gsrch;
fcuda.gridAdjCnt = gsrch*gsrch*gsrch;
fcuda.gridScanMax = res;
fcuda.gridScanMax -= make_int3( fcuda.gridSrch, fcuda.gridSrch, fcuda.gridSrch );
fcuda.chk = chk;
// Build Adjacency Lookup
int cell = 0;
for (int y=0; y < gsrch; y++ )
for (int z=0; z < gsrch; z++ )
for (int x=0; x < gsrch; x++ )
fcuda.gridAdj [ cell++] = ( y * fcuda.gridRes.z+ z )*fcuda.gridRes.x + x ;
app_printf ( "CUDA Adjacency Table\n");
for (int n=0; n < fcuda.gridAdjCnt; n++ ) {
app_printf ( " ADJ: %d, %d\n", n, fcuda.gridAdj[n] );
}
// Compute number of blocks and threads
int threadsPerBlock = 192;
computeNumBlocks ( fcuda.pnum, threadsPerBlock, fcuda.numBlocks, fcuda.numThreads); // particles
computeNumBlocks ( fcuda.gridTotal, threadsPerBlock, fcuda.gridBlocks, fcuda.gridThreads); // grid cell
// Allocate particle buffers
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads);
app_printf ( "CUDA Allocate: \n" );
app_printf ( " Pnts: %d, t:%dx%d=%d, Size:%d\n", fcuda.pnum, fcuda.numBlocks, fcuda.numThreads, fcuda.numBlocks*fcuda.numThreads, fcuda.szPnts);
app_printf ( " Grid: %d, t:%dx%d=%d, bufGrid:%d, Res: %dx%dx%d\n", fcuda.gridTotal, fcuda.gridBlocks, fcuda.gridThreads, fcuda.gridBlocks*fcuda.gridThreads, fcuda.szGrid, (int) fcuda.gridRes.x, (int) fcuda.gridRes.y, (int) fcuda.gridRes.z );
cudaCheck ( hipMalloc ( (void**) &fbuf.mpos, fcuda.szPnts*sizeof(float)*3 ), "Malloc mpos" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mvel, fcuda.szPnts*sizeof(float)*3 ), "Malloc mvel" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mveleval, fcuda.szPnts*sizeof(float)*3 ), "Malloc mveleval" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mforce, fcuda.szPnts*sizeof(float)*3 ), "Malloc mforce" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mpress, fcuda.szPnts*sizeof(float) ), "Malloc mpress" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mdensity, fcuda.szPnts*sizeof(float) ), "Malloc mdensity" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgcell, fcuda.szPnts*sizeof(uint) ), "Malloc mgcell" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgndx, fcuda.szPnts*sizeof(uint)), "Malloc mgndx" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mclr, fcuda.szPnts*sizeof(uint) ), "Malloc mclr" );
//cudaCheck ( hipMalloc ( (void**) &fbuf.mcluster, fcuda.szPnts*sizeof(uint) ) );
int temp_size = 4*(sizeof(float)*3) + 2*sizeof(float) + 2*sizeof(int) + sizeof(uint);
cudaCheck ( hipMalloc ( (void**) &fbuf.msortbuf, fcuda.szPnts*temp_size ), "Malloc msortbuf" );
// Allocate grid
fcuda.szGrid = (fcuda.gridBlocks * fcuda.gridThreads);
cudaCheck ( hipMalloc ( (void**) &fbuf.mgrid, fcuda.szPnts*sizeof(int) ), "Malloc mgrid" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridcnt, fcuda.szGrid*sizeof(int) ), "Malloc mgridcnt" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridoff, fcuda.szGrid*sizeof(int) ), "Malloc mgridoff" );
cudaCheck ( hipMalloc ( (void**) &fbuf.mgridactive, fcuda.szGrid*sizeof(int) ), "Malloc mgridactive" );
// Transfer sim params to device
updateSimParams ( &fcuda );
hipDeviceSynchronize ();
// Prefix Sum - Preallocate Block sums for Sorting
deallocBlockSumsInt ();
preallocBlockSumsInt ( fcuda.gridTotal );
}
void FluidParamCUDA ( float ss, float sr, float pr, float mass, float rest, float3 bmin, float3 bmax, float estiff, float istiff, float visc, float damp, float fmin, float fmax, float ffreq, float gslope, float gx, float gy, float gz, float al, float vl )
{
fcuda.psimscale = ss;
fcuda.psmoothradius = sr;
fcuda.pradius = pr;
fcuda.r2 = sr * sr;
fcuda.pmass = mass;
fcuda.prest_dens = rest;
fcuda.pboundmin = bmin;
fcuda.pboundmax = bmax;
fcuda.pextstiff = estiff;
fcuda.pintstiff = istiff;
fcuda.pvisc = visc;
fcuda.pdamp = damp;
fcuda.pforce_min = fmin;
fcuda.pforce_max = fmax;
fcuda.pforce_freq = ffreq;
fcuda.pground_slope = gslope;
fcuda.pgravity = make_float3( gx, gy, gz );
fcuda.AL = al;
fcuda.AL2 = al * al;
fcuda.VL = vl;
fcuda.VL2 = vl * vl;
//app_printf ( "Bound Min: %f %f %f\n", bmin.x, bmin.y, bmin.z );
//app_printf ( "Bound Max: %f %f %f\n", bmax.x, bmax.y, bmax.z );
fcuda.pdist = pow ( fcuda.pmass / fcuda.prest_dens, 1/3.0f );
fcuda.poly6kern = 315.0f / (64.0f * 3.141592 * pow( sr, 9.0f) );
fcuda.spikykern = -45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.lapkern = 45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.d2 = fcuda.psimscale * fcuda.psimscale;
fcuda.rd2 = fcuda.r2 / fcuda.d2;
fcuda.vterm = fcuda.lapkern * fcuda.pvisc;
// Transfer sim params to device
updateSimParams ( &fcuda );
hipDeviceSynchronize ();
}
void CopyToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr )
{
// Send particle buffers
int numPoints = fcuda.pnum;
cudaCheck( hipMemcpy ( fbuf.mpos, pos, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ), "Memcpy mpos ToDev" );
cudaCheck( hipMemcpy ( fbuf.mvel, vel, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ), "Memcpy mvel ToDev" );
cudaCheck( hipMemcpy ( fbuf.mveleval, veleval, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ), "Memcpy mveleval ToDev" );
cudaCheck( hipMemcpy ( fbuf.mforce, force, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ), "Memcpy mforce ToDev" );
cudaCheck( hipMemcpy ( fbuf.mpress, pressure, numPoints*sizeof(float), hipMemcpyHostToDevice ), "Memcpy mpress ToDev" );
cudaCheck( hipMemcpy ( fbuf.mdensity, density, numPoints*sizeof(float), hipMemcpyHostToDevice ), "Memcpy mdensity ToDev" );
cudaCheck( hipMemcpy ( fbuf.mclr, clr, numPoints*sizeof(uint), hipMemcpyHostToDevice ), "Memcpy mclr ToDev" );
hipDeviceSynchronize ();
}
void CopyFromCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr )
{
// Return particle buffers
int numPoints = fcuda.pnum;
if ( pos != 0x0 ) cudaCheck( hipMemcpy ( pos, fbuf.mpos, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ), "Memcpy mpos FromDev" );
if ( clr != 0x0 ) cudaCheck( hipMemcpy ( clr, fbuf.mclr, numPoints*sizeof(uint), hipMemcpyDeviceToHost ), "Memcpy mclr FromDev" );
/*cudaCheck( hipMemcpy ( vel, fbuf.mvel, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
cudaCheck( hipMemcpy ( veleval, fbuf.mveleval, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
cudaCheck( hipMemcpy ( force, fbuf.mforce, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
cudaCheck( hipMemcpy ( pressure, fbuf.mpress, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
cudaCheck( hipMemcpy ( density, fbuf.mdensity, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );*/
hipDeviceSynchronize ();
}
void InsertParticlesCUDA ( uint* gcell, uint* ccell, int* gcnt )
{
hipMemset ( fbuf.mgridcnt, 0, fcuda.gridTotal * sizeof(int));
hipLaunchKernelGGL(( insertParticles), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: InsertParticlesCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
// Transfer data back if requested (for validation)
if (gcell != 0x0) {
cudaCheck( hipMemcpy ( gcell, fbuf.mgcell, fcuda.pnum*sizeof(uint), hipMemcpyDeviceToHost ), "Memcpy mgcell FromDev");
cudaCheck( hipMemcpy ( gcnt, fbuf.mgridcnt, fcuda.gridTotal*sizeof(int), hipMemcpyDeviceToHost ), "Memcpy mgridcnt FromDev" );
//cudaCheck( hipMemcpy ( ccell, fbuf.mcluster, fcuda.pnum*sizeof(uint), hipMemcpyDeviceToHost ) );
}
}
void PrefixSumCellsCUDA ( int* goff )
{
// Prefix Sum - determine grid offsets
prescanArrayRecursiveInt ( fbuf.mgridoff, fbuf.mgridcnt, fcuda.gridTotal, 0);
hipDeviceSynchronize ();
// Transfer data back if requested
if ( goff != 0x0 ) {
cudaCheck( hipMemcpy ( goff, fbuf.mgridoff, fcuda.gridTotal * sizeof(int), hipMemcpyDeviceToHost ), "Memcpy mgoff FromDev" );
}
}
void CountingSortIndexCUDA ( uint* ggrid )
{
// Counting Sort - pass one, determine grid counts
hipMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
hipLaunchKernelGGL(( countingSortIndex) , dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipDeviceSynchronize ();
// Transfer data back if requested
if ( ggrid != 0x0 ) {
cudaCheck( hipMemcpy ( ggrid, fbuf.mgrid, fcuda.pnum * sizeof(uint), hipMemcpyDeviceToHost ), "Memcpy mgrid FromDev" );
}
}
void CountingSortFullCUDA ( uint* ggrid )
{
// Transfer particle data to temp buffers
int n = fcuda.pnum;
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_POS, fbuf.mpos, n*sizeof(float)*3, hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mpos DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_VEL, fbuf.mvel, n*sizeof(float)*3, hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mvel DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_VELEVAL, fbuf.mveleval, n*sizeof(float)*3, hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mveleval DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_FORCE, fbuf.mforce, n*sizeof(float)*3, hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mforce DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_PRESS, fbuf.mpress, n*sizeof(float), hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mpress DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_DENS, fbuf.mdensity, n*sizeof(float), hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mdens DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_GCELL, fbuf.mgcell, n*sizeof(uint), hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mgcell DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_GNDX, fbuf.mgndx, n*sizeof(uint), hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mgndx DevToDev" );
cudaCheck ( hipMemcpy ( fbuf.msortbuf + n*BUF_CLR, fbuf.mclr, n*sizeof(uint), hipMemcpyDeviceToDevice ), "Memcpy msortbuf->mclr DevToDev" );
// Counting Sort - pass one, determine grid counts
hipMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
hipLaunchKernelGGL(( countingSortFull) , dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipDeviceSynchronize ();
}
void ComputePressureCUDA ()
{
hipLaunchKernelGGL(( computePressure), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
void ComputeQueryCUDA ()
{
hipLaunchKernelGGL(( computeQuery), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
void CountActiveCUDA ()
{
int threads = 1;
int blocks = 1;
assert ( fbuf.mgridactive != 0x0 );
/*#ifdef CUDA_42
hipMemcpyToSymbol ( "gridActive", &fcuda.gridActive, sizeof(int) );
#else
hipMemcpyToSymbol ( gridActive, &fcuda.gridActive, sizeof(int) );
#endif */
hipLaunchKernelGGL(( countActiveCells), dim3(blocks), dim3(threads) , 0, 0, fbuf, fcuda.gridTotal );
hipDeviceSynchronize ();
hipMemcpyFromSymbol ( &fcuda.gridActive, "gridActive", sizeof(int) );
app_printf ( "Active cells: %d\n", fcuda.gridActive );
}
void ComputePressureGroupCUDA ()
{
if ( fcuda.gridActive > 0 ) {
int threads = 128; // should be based on maximum occupancy
uint3 blocks;
blocks.x = 4096;
blocks.y = (fcuda.gridActive / 4096 )+1;
blocks.z = 1;
hipLaunchKernelGGL(( computePressureGroup), dim3(blocks), dim3(threads) , 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureGroupCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
}
void ComputeForceCUDA ()
{
hipLaunchKernelGGL(( computeForce), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputeForceCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
void AdvanceCUDA ( float tm, float dt, float ss )
{
hipLaunchKernelGGL(( advanceParticles), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, tm, dt, ss, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: AdvanceCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
// includes, kernels
#include <assert.h>
inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; }
inline int floorPow2(int n) {
#ifdef WIN32
return 1 << (int)logb((float)n);
#else
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
float** g_scanBlockSums = 0;
int** g_scanBlockSumsInt = 0;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
cudaCheck ( hipMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(float)), "Malloc prescanBlockSums g_scanBlockSums");
numElts = numBlocks;
} while (numElts > 1);
}
void preallocBlockSumsInt (unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) cudaCheck ( hipMalloc((void**) &g_scanBlockSumsInt[level++], numBlocks * sizeof(int)), "Malloc prescanBlockSumsInt g_scanBlockSumsInt");
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSums()
{
if ( g_scanBlockSums != 0x0 ) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck ( hipFree(g_scanBlockSums[i]), "Malloc deallocBlockSums g_scanBlockSums");
free( (void**)g_scanBlockSums );
}
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void deallocBlockSumsInt()
{
if ( g_scanBlockSums != 0x0 ) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck ( hipFree(g_scanBlockSumsInt[i]), "Malloc deallocBlockSumsInt g_scanBlockSumsInt");
free( (void**)g_scanBlockSumsInt );
}
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursive (float *outArray, const float *inArray, int numElements, int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1) assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive (g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numElements, 0, 0);
}
}
void prescanArrayRecursiveInt (int *outArray, const int *inArray, int numElements, int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1) assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
hipLaunchKernelGGL(( prescanInt <true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, g_scanBlockSumsInt[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( prescanInt <true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0, outArray, inArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursiveInt (g_scanBlockSumsInt[level], g_scanBlockSumsInt[level], numBlocks, level+1);
hipLaunchKernelGGL(( uniformAddInt) , dim3(grid), dim3(threads) , 0, 0, outArray, g_scanBlockSumsInt[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( uniformAddInt) , dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
hipLaunchKernelGGL(( prescanInt <false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
hipLaunchKernelGGL(( prescanInt <false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numElements, 0, 0);
}
}
void prescanArray ( float *d_odata, float *d_idata, int num )
{
// preform prefix sum
preallocBlockSums( num );
prescanArrayRecursive ( d_odata, d_idata, num, 0);
deallocBlockSums();
}
void prescanArrayInt ( int *d_odata, int *d_idata, int num )
{
// preform prefix sum
preallocBlockSumsInt ( num );
prescanArrayRecursiveInt ( d_odata, d_idata, num, 0);
deallocBlockSumsInt ();
}
char* d_idata = NULL;
char* d_odata = NULL;
void prefixSum ( int num )
{
prescanArray ( (float*) d_odata, (float*) d_idata, num );
}
void prefixSumInt ( int num )
{
prescanArrayInt ( (int*) d_odata, (int*) d_idata, num );
}
void prefixSumToGPU ( char* inArray, int num, int siz )
{
cudaCheck ( hipMalloc( (void**) &d_idata, num*siz ), "Malloc prefixumSimToGPU idata");
cudaCheck ( hipMalloc( (void**) &d_odata, num*siz ), "Malloc prefixumSimToGPU odata" );
cudaCheck ( hipMemcpy( d_idata, inArray, num*siz, hipMemcpyHostToDevice), "Memcpy inArray->idata" );
}
void prefixSumFromGPU ( char* outArray, int num, int siz )
{
cudaCheck ( hipMemcpy( outArray, d_odata, num*siz, hipMemcpyDeviceToHost), "Memcpy odata->outArray" );
cudaCheck ( hipFree( d_idata ), "Free idata" );
cudaCheck ( hipFree( d_odata ), "Free odata" );
d_idata = NULL;
d_odata = NULL;
}
| b0f11749c3a13d6e19d274da4923be17f0ac8601.cu | /*
FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU
Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com
Attribute-ZLib license (* See additional part 4)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
4. Any published work based on this code must include public acknowledgement
of the origin. This includes following when applicable:
- Journal/Paper publications. Credited by reference to work in text & citation.
- Public presentations. Credited in at least one slide.
- Distributed Games/Apps. Credited as single line in game or app credit page.
Retaining this additional license term is required in derivative works.
Acknowledgement may be provided as:
Publication version:
2012-2013, Hoetzlein, Rama C. Fluids v.3 - A Large-Scale, Open Source
Fluid Simulator. Published online at: http://fluids3.com
Single line (slides or app credits):
GPU Fluids: Rama C. Hoetzlein (Fluids v3 2013)
Notes on Clause 4:
The intent of this clause is public attribution for this contribution, not code use restriction.
Both commerical and open source projects may redistribute and reuse without code release.
However, clause #1 of ZLib indicates that "you must not claim that you wrote the original software".
Clause #4 makes this more specific by requiring public acknowledgement to be extended to
derivative licenses.
*/
#ifdef WIN32
#include <conio.h>
#endif
//#include <cutil.h> // cutil32.lib
#include "cutil_math.h" // cutil32.lib
#include <string.h>
#include <assert.h>
#ifdef WIN32
#include <windows.h>
#endif
//#include <cuda_gl_interop.h>
#include <stdio.h>
#include <math.h>
extern void app_printf ( char* format, ... );
extern void app_printEXIT ( char* format, ... );
extern char app_getch ();
#include "fluid_system_host.cuh"
#include "fluid_system_kern.cuh"
FluidParams fcuda; // CPU Fluid params
FluidParams* mcuda; // GPU Fluid params
bufList fbuf; // GPU Particle buffers
bool cudaCheck ( cudaError_t status, char* msg )
{
if ( status != cudaSuccess ) {
app_printf ( "CUDA ERROR: %s\n", cudaGetErrorString ( status ) );
app_getch ();
// MessageBox ( NULL, cudaGetErrorString ( status), msg, MB_OK );
return false;
} else {
//app_printf ( "%s. OK.\n", msg );
}
return true;
}
void cudaExit ()
{
int argc = 1;
char* argv[] = {"fluids"};
cudaDeviceReset();
}
// Initialize CUDA
void cudaInit()
{
int argc = 1;
char* argv[] = {"fluids"};
int count = 0;
int i = 0;
cudaError_t err = cudaGetDeviceCount(&count);
if ( err==cudaErrorInsufficientDriver) { app_printEXIT( "CUDA driver not installed.\n"); }
if ( err==cudaErrorNoDevice) { app_printEXIT ( "No CUDA device found.\n"); }
if ( count == 0) { app_printEXIT ( "No CUDA device found.\n"); }
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
if(prop.major >= 1) break;
}
if(i == count) { app_printEXIT ( "No CUDA device found.\n"); }
cudaSetDevice(i);
app_printf( "CUDA initialized.\n");
cudaDeviceProp p;
cudaGetDeviceProperties ( &p, 0);
app_printf ( "-- CUDA --\n" );
app_printf ( "Name: %s\n", p.name );
app_printf ( "Revision: %d.%d\n", p.major, p.minor );
app_printf ( "Global Mem: %d\n", p.totalGlobalMem );
app_printf ( "Shared/Blk: %d\n", p.sharedMemPerBlock );
app_printf ( "Regs/Blk: %d\n", p.regsPerBlock );
app_printf ( "Warp Size: %d\n", p.warpSize );
app_printf ( "Mem Pitch: %d\n", p.memPitch );
app_printf ( "Thrds/Blk: %d\n", p.maxThreadsPerBlock );
app_printf ( "Const Mem: %d\n", p.totalConstMem );
app_printf ( "Clock Rate: %d\n", p.clockRate );
fbuf.mgridactive = 0x0;
// Allocate the sim parameters
cudaCheck ( cudaMalloc ( (void**) &mcuda, sizeof(FluidParams) ), "Malloc FluidParams mcuda" );
// Allocate particle buffers
cudaCheck ( cudaMalloc ( (void**) &fbuf.mpos, sizeof(float)*3 ), "Malloc mpos" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mvel, sizeof(float)*3), "Malloc mvel" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mveleval, sizeof(float)*3), "Malloc mveleval" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mforce, sizeof(float)*3), "Malloc mforce" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mpress, sizeof(float) ), "Malloc mpress" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mdensity, sizeof(float) ), "Malloc mdensity" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgcell, sizeof(uint)), "Malloc mgcell" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgndx, sizeof(uint)), "Malloc mgndx" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mclr, sizeof(uint)), "Malloc mclr" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.msortbuf, sizeof(uint) ), "Malloc msortbu" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgrid, 1 ), "Malloc mgrid" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridcnt, 1 ), "Malloc mgridcnt" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridoff, 1 ), "Malloc mgridoff" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridactive, 1 ), "Malloc mgridactive");
//cudaCheck ( cudaMalloc ( (void**) &fbuf.mcluster, sizeof(uint) ) );
preallocBlockSumsInt ( 1 );
};
// Compute number of blocks to create
int iDivUp (int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads)
{
numThreads = min( maxThreads, numPnts );
numBlocks = iDivUp ( numPnts, numThreads );
}
void FluidClearCUDA ()
{
cudaCheck ( cudaFree ( fbuf.mpos ), "Free mpos" );
cudaCheck ( cudaFree ( fbuf.mvel ), "Free mvel" );
cudaCheck ( cudaFree ( fbuf.mveleval ), "Free mveleval" );
cudaCheck ( cudaFree ( fbuf.mforce ), "Free mforce" );
cudaCheck ( cudaFree ( fbuf.mpress ), "Free mpress");
cudaCheck ( cudaFree ( fbuf.mdensity ), "Free mdensity" );
cudaCheck ( cudaFree ( fbuf.mgcell ), "Free mgcell" );
cudaCheck ( cudaFree ( fbuf.mgndx ), "Free mgndx" );
cudaCheck ( cudaFree ( fbuf.mclr ), "Free mclr" );
//cudaCheck ( cudaFree ( fbuf.mcluster ) );
cudaCheck ( cudaFree ( fbuf.msortbuf ), "Free msortbuf" );
cudaCheck ( cudaFree ( fbuf.mgrid ), "Free mgrid" );
cudaCheck ( cudaFree ( fbuf.mgridcnt ), "Free mgridcnt" );
cudaCheck ( cudaFree ( fbuf.mgridoff ), "Free mgridoff" );
cudaCheck ( cudaFree ( fbuf.mgridactive ), "Free mgridactive" );
}
void FluidSetupCUDA ( int num, int gsrch, int3 res, float3 size, float3 delta, float3 gmin, float3 gmax, int total, int chk )
{
fcuda.pnum = num;
fcuda.gridRes = res;
fcuda.gridSize = size;
fcuda.gridDelta = delta;
fcuda.gridMin = gmin;
fcuda.gridMax = gmax;
fcuda.gridTotal = total;
fcuda.gridSrch = gsrch;
fcuda.gridAdjCnt = gsrch*gsrch*gsrch;
fcuda.gridScanMax = res;
fcuda.gridScanMax -= make_int3( fcuda.gridSrch, fcuda.gridSrch, fcuda.gridSrch );
fcuda.chk = chk;
// Build Adjacency Lookup
int cell = 0;
for (int y=0; y < gsrch; y++ )
for (int z=0; z < gsrch; z++ )
for (int x=0; x < gsrch; x++ )
fcuda.gridAdj [ cell++] = ( y * fcuda.gridRes.z+ z )*fcuda.gridRes.x + x ;
app_printf ( "CUDA Adjacency Table\n");
for (int n=0; n < fcuda.gridAdjCnt; n++ ) {
app_printf ( " ADJ: %d, %d\n", n, fcuda.gridAdj[n] );
}
// Compute number of blocks and threads
int threadsPerBlock = 192;
computeNumBlocks ( fcuda.pnum, threadsPerBlock, fcuda.numBlocks, fcuda.numThreads); // particles
computeNumBlocks ( fcuda.gridTotal, threadsPerBlock, fcuda.gridBlocks, fcuda.gridThreads); // grid cell
// Allocate particle buffers
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads);
app_printf ( "CUDA Allocate: \n" );
app_printf ( " Pnts: %d, t:%dx%d=%d, Size:%d\n", fcuda.pnum, fcuda.numBlocks, fcuda.numThreads, fcuda.numBlocks*fcuda.numThreads, fcuda.szPnts);
app_printf ( " Grid: %d, t:%dx%d=%d, bufGrid:%d, Res: %dx%dx%d\n", fcuda.gridTotal, fcuda.gridBlocks, fcuda.gridThreads, fcuda.gridBlocks*fcuda.gridThreads, fcuda.szGrid, (int) fcuda.gridRes.x, (int) fcuda.gridRes.y, (int) fcuda.gridRes.z );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mpos, fcuda.szPnts*sizeof(float)*3 ), "Malloc mpos" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mvel, fcuda.szPnts*sizeof(float)*3 ), "Malloc mvel" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mveleval, fcuda.szPnts*sizeof(float)*3 ), "Malloc mveleval" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mforce, fcuda.szPnts*sizeof(float)*3 ), "Malloc mforce" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mpress, fcuda.szPnts*sizeof(float) ), "Malloc mpress" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mdensity, fcuda.szPnts*sizeof(float) ), "Malloc mdensity" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgcell, fcuda.szPnts*sizeof(uint) ), "Malloc mgcell" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgndx, fcuda.szPnts*sizeof(uint)), "Malloc mgndx" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mclr, fcuda.szPnts*sizeof(uint) ), "Malloc mclr" );
//cudaCheck ( cudaMalloc ( (void**) &fbuf.mcluster, fcuda.szPnts*sizeof(uint) ) );
int temp_size = 4*(sizeof(float)*3) + 2*sizeof(float) + 2*sizeof(int) + sizeof(uint);
cudaCheck ( cudaMalloc ( (void**) &fbuf.msortbuf, fcuda.szPnts*temp_size ), "Malloc msortbuf" );
// Allocate grid
fcuda.szGrid = (fcuda.gridBlocks * fcuda.gridThreads);
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgrid, fcuda.szPnts*sizeof(int) ), "Malloc mgrid" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridcnt, fcuda.szGrid*sizeof(int) ), "Malloc mgridcnt" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridoff, fcuda.szGrid*sizeof(int) ), "Malloc mgridoff" );
cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridactive, fcuda.szGrid*sizeof(int) ), "Malloc mgridactive" );
// Transfer sim params to device
updateSimParams ( &fcuda );
cudaThreadSynchronize ();
// Prefix Sum - Preallocate Block sums for Sorting
deallocBlockSumsInt ();
preallocBlockSumsInt ( fcuda.gridTotal );
}
void FluidParamCUDA ( float ss, float sr, float pr, float mass, float rest, float3 bmin, float3 bmax, float estiff, float istiff, float visc, float damp, float fmin, float fmax, float ffreq, float gslope, float gx, float gy, float gz, float al, float vl )
{
fcuda.psimscale = ss;
fcuda.psmoothradius = sr;
fcuda.pradius = pr;
fcuda.r2 = sr * sr;
fcuda.pmass = mass;
fcuda.prest_dens = rest;
fcuda.pboundmin = bmin;
fcuda.pboundmax = bmax;
fcuda.pextstiff = estiff;
fcuda.pintstiff = istiff;
fcuda.pvisc = visc;
fcuda.pdamp = damp;
fcuda.pforce_min = fmin;
fcuda.pforce_max = fmax;
fcuda.pforce_freq = ffreq;
fcuda.pground_slope = gslope;
fcuda.pgravity = make_float3( gx, gy, gz );
fcuda.AL = al;
fcuda.AL2 = al * al;
fcuda.VL = vl;
fcuda.VL2 = vl * vl;
//app_printf ( "Bound Min: %f %f %f\n", bmin.x, bmin.y, bmin.z );
//app_printf ( "Bound Max: %f %f %f\n", bmax.x, bmax.y, bmax.z );
fcuda.pdist = pow ( fcuda.pmass / fcuda.prest_dens, 1/3.0f );
fcuda.poly6kern = 315.0f / (64.0f * 3.141592 * pow( sr, 9.0f) );
fcuda.spikykern = -45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.lapkern = 45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.d2 = fcuda.psimscale * fcuda.psimscale;
fcuda.rd2 = fcuda.r2 / fcuda.d2;
fcuda.vterm = fcuda.lapkern * fcuda.pvisc;
// Transfer sim params to device
updateSimParams ( &fcuda );
cudaThreadSynchronize ();
}
void CopyToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr )
{
// Send particle buffers
int numPoints = fcuda.pnum;
cudaCheck( cudaMemcpy ( fbuf.mpos, pos, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mpos ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mvel, vel, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mvel ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mveleval, veleval, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mveleval ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mforce, force, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mforce ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mpress, pressure, numPoints*sizeof(float), cudaMemcpyHostToDevice ), "Memcpy mpress ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mdensity, density, numPoints*sizeof(float), cudaMemcpyHostToDevice ), "Memcpy mdensity ToDev" );
cudaCheck( cudaMemcpy ( fbuf.mclr, clr, numPoints*sizeof(uint), cudaMemcpyHostToDevice ), "Memcpy mclr ToDev" );
cudaThreadSynchronize ();
}
void CopyFromCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr )
{
// Return particle buffers
int numPoints = fcuda.pnum;
if ( pos != 0x0 ) cudaCheck( cudaMemcpy ( pos, fbuf.mpos, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ), "Memcpy mpos FromDev" );
if ( clr != 0x0 ) cudaCheck( cudaMemcpy ( clr, fbuf.mclr, numPoints*sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mclr FromDev" );
/*cudaCheck( cudaMemcpy ( vel, fbuf.mvel, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
cudaCheck( cudaMemcpy ( veleval, fbuf.mveleval, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
cudaCheck( cudaMemcpy ( force, fbuf.mforce, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
cudaCheck( cudaMemcpy ( pressure, fbuf.mpress, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
cudaCheck( cudaMemcpy ( density, fbuf.mdensity, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );*/
cudaThreadSynchronize ();
}
void InsertParticlesCUDA ( uint* gcell, uint* ccell, int* gcnt )
{
cudaMemset ( fbuf.mgridcnt, 0, fcuda.gridTotal * sizeof(int));
insertParticles<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: InsertParticlesCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
// Transfer data back if requested (for validation)
if (gcell != 0x0) {
cudaCheck( cudaMemcpy ( gcell, fbuf.mgcell, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mgcell FromDev");
cudaCheck( cudaMemcpy ( gcnt, fbuf.mgridcnt, fcuda.gridTotal*sizeof(int), cudaMemcpyDeviceToHost ), "Memcpy mgridcnt FromDev" );
//cudaCheck( cudaMemcpy ( ccell, fbuf.mcluster, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ) );
}
}
void PrefixSumCellsCUDA ( int* goff )
{
// Prefix Sum - determine grid offsets
prescanArrayRecursiveInt ( fbuf.mgridoff, fbuf.mgridcnt, fcuda.gridTotal, 0);
cudaThreadSynchronize ();
// Transfer data back if requested
if ( goff != 0x0 ) {
cudaCheck( cudaMemcpy ( goff, fbuf.mgridoff, fcuda.gridTotal * sizeof(int), cudaMemcpyDeviceToHost ), "Memcpy mgoff FromDev" );
}
}
void CountingSortIndexCUDA ( uint* ggrid )
{
// Counting Sort - pass one, determine grid counts
cudaMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
countingSortIndex <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaThreadSynchronize ();
// Transfer data back if requested
if ( ggrid != 0x0 ) {
cudaCheck( cudaMemcpy ( ggrid, fbuf.mgrid, fcuda.pnum * sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mgrid FromDev" );
}
}
void CountingSortFullCUDA ( uint* ggrid )
{
// Transfer particle data to temp buffers
int n = fcuda.pnum;
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_POS, fbuf.mpos, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mpos DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_VEL, fbuf.mvel, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mvel DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_VELEVAL, fbuf.mveleval, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mveleval DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_FORCE, fbuf.mforce, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mforce DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_PRESS, fbuf.mpress, n*sizeof(float), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mpress DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_DENS, fbuf.mdensity, n*sizeof(float), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mdens DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_GCELL, fbuf.mgcell, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mgcell DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_GNDX, fbuf.mgndx, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mgndx DevToDev" );
cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_CLR, fbuf.mclr, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mclr DevToDev" );
// Counting Sort - pass one, determine grid counts
cudaMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
countingSortFull <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaThreadSynchronize ();
}
void ComputePressureCUDA ()
{
computePressure<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
void ComputeQueryCUDA ()
{
computeQuery<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
void CountActiveCUDA ()
{
int threads = 1;
int blocks = 1;
assert ( fbuf.mgridactive != 0x0 );
/*#ifdef CUDA_42
cudaMemcpyToSymbol ( "gridActive", &fcuda.gridActive, sizeof(int) );
#else
cudaMemcpyToSymbol ( gridActive, &fcuda.gridActive, sizeof(int) );
#endif */
countActiveCells<<< blocks, threads >>> ( fbuf, fcuda.gridTotal );
cudaThreadSynchronize ();
cudaMemcpyFromSymbol ( &fcuda.gridActive, "gridActive", sizeof(int) );
app_printf ( "Active cells: %d\n", fcuda.gridActive );
}
void ComputePressureGroupCUDA ()
{
if ( fcuda.gridActive > 0 ) {
int threads = 128; // should be based on maximum occupancy
uint3 blocks;
blocks.x = 4096;
blocks.y = (fcuda.gridActive / 4096 )+1;
blocks.z = 1;
computePressureGroup<<< blocks, threads >>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputePressureGroupCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
}
void ComputeForceCUDA ()
{
computeForce<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: ComputeForceCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
void AdvanceCUDA ( float tm, float dt, float ss )
{
advanceParticles<<< fcuda.numBlocks, fcuda.numThreads>>> ( tm, dt, ss, fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: AdvanceCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
// includes, kernels
#include <assert.h>
inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; }
inline int floorPow2(int n) {
#ifdef WIN32
return 1 << (int)logb((float)n);
#else
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
float** g_scanBlockSums = 0;
int** g_scanBlockSumsInt = 0;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
cudaCheck ( cudaMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(float)), "Malloc prescanBlockSums g_scanBlockSums");
numElts = numBlocks;
} while (numElts > 1);
}
void preallocBlockSumsInt (unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) cudaCheck ( cudaMalloc((void**) &g_scanBlockSumsInt[level++], numBlocks * sizeof(int)), "Malloc prescanBlockSumsInt g_scanBlockSumsInt");
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSums()
{
if ( g_scanBlockSums != 0x0 ) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck ( cudaFree(g_scanBlockSums[i]), "Malloc deallocBlockSums g_scanBlockSums");
free( (void**)g_scanBlockSums );
}
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void deallocBlockSumsInt()
{
if ( g_scanBlockSums != 0x0 ) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck ( cudaFree(g_scanBlockSumsInt[i]), "Malloc deallocBlockSumsInt g_scanBlockSumsInt");
free( (void**)g_scanBlockSumsInt );
}
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursive (float *outArray, const float *inArray, int numElements, int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1) assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
prescan<true, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive (g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
uniformAdd<<< grid, threads >>> (outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
uniformAdd<<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
prescan<false, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
prescan<false, true><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numElements, 0, 0);
}
}
void prescanArrayRecursiveInt (int *outArray, const int *inArray, int numElements, int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1) assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
prescanInt <true, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, g_scanBlockSumsInt[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
prescanInt <true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursiveInt (g_scanBlockSumsInt[level], g_scanBlockSumsInt[level], numBlocks, level+1);
uniformAddInt <<< grid, threads >>> (outArray, g_scanBlockSumsInt[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
uniformAddInt <<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
prescanInt <false, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
prescanInt <false, true><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numElements, 0, 0);
}
}
void prescanArray ( float *d_odata, float *d_idata, int num )
{
// preform prefix sum
preallocBlockSums( num );
prescanArrayRecursive ( d_odata, d_idata, num, 0);
deallocBlockSums();
}
void prescanArrayInt ( int *d_odata, int *d_idata, int num )
{
// preform prefix sum
preallocBlockSumsInt ( num );
prescanArrayRecursiveInt ( d_odata, d_idata, num, 0);
deallocBlockSumsInt ();
}
char* d_idata = NULL;
char* d_odata = NULL;
void prefixSum ( int num )
{
prescanArray ( (float*) d_odata, (float*) d_idata, num );
}
void prefixSumInt ( int num )
{
prescanArrayInt ( (int*) d_odata, (int*) d_idata, num );
}
void prefixSumToGPU ( char* inArray, int num, int siz )
{
cudaCheck ( cudaMalloc( (void**) &d_idata, num*siz ), "Malloc prefixumSimToGPU idata");
cudaCheck ( cudaMalloc( (void**) &d_odata, num*siz ), "Malloc prefixumSimToGPU odata" );
cudaCheck ( cudaMemcpy( d_idata, inArray, num*siz, cudaMemcpyHostToDevice), "Memcpy inArray->idata" );
}
void prefixSumFromGPU ( char* outArray, int num, int siz )
{
cudaCheck ( cudaMemcpy( outArray, d_odata, num*siz, cudaMemcpyDeviceToHost), "Memcpy odata->outArray" );
cudaCheck ( cudaFree( d_idata ), "Free idata" );
cudaCheck ( cudaFree( d_odata ), "Free odata" );
d_idata = NULL;
d_odata = NULL;
}
|
0c72419a37c18ca32d072fafbc6b455b522b990b.hip | // !!! This is a file automatically generated by hipify!!!
// Device routines that can be #included by the kernels file.
#include "cuda_struct.h"
#include "kernel.h"
#ifdef __HIPCC__
__device__ __forceinline__ f64 GetEzShape(f64 r) {
return 1.0 - 1.0 / (1.0 + exp(-24.0*(r - 4.32)));
// return 1.0 - 1.0 / (1.0 + exp(-16.0*(r - 4.2))); // At 4.0cm it is 96% as strong as at tooth. At 4.4 it is 4%.
}
#else
f64 inline GetEzShape_(f64 r) {
return 1.0 - 1.0 / (1.0 + exp(-16.0*(r - 4.2))); // At 4.0cm it is 96% as strong as at tooth. 4.2 50%. At 4.4 it is 4%.
}
#endif
__device__ __forceinline__ f64 Get_lnLambda_ion_d(f64 n_ion,f64 T_ion)
{
// Assume static f64 const is no good in kernel.
f64 factor, lnLambda_sq;
f64 Tion_eV3 = T_ion*T_ion*T_ion*one_over_kB_cubed;
f64 lnLambda = 23.0 - 0.5*log(n_ion/Tion_eV3);
// floor at 2.0:
lnLambda_sq = lnLambda*lnLambda;
factor = 1.0+0.5*lnLambda+0.25*lnLambda_sq+0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0/factor;
return lnLambda;
}
__device__ __forceinline__ f64 Get_lnLambda_d(real n_e,real T_e)
{
real lnLambda, factor, lnLambda_sq, lnLambda1, lnLambda2;
real Te_eV = T_e*one_over_kB;
real Te_eV2 = Te_eV*Te_eV;
real Te_eV3 = Te_eV*Te_eV2;
if (n_e*Te_eV3 > 0.0) {
lnLambda1 = 23.0 - 0.5*log(n_e/Te_eV3);
lnLambda2 = 24.0 - 0.5*log(n_e/Te_eV2);
// smooth between the two:
factor = 2.0*fabs(Te_eV-10.0)*(Te_eV-10.0)/(1.0+4.0*(Te_eV-10.0)*(Te_eV-10.0));
lnLambda = lnLambda1*(0.5-factor)+lnLambda2*(0.5+factor);
// floor at 2 just in case, but it should not get near:
lnLambda_sq = lnLambda*lnLambda;
factor = 1.0+0.5*lnLambda+0.25*lnLambda_sq+0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0/factor;
// Golant p.40 warns that it becomes invalid when an electron gyroradius is less than a Debye radius.
// That is something to worry about if B/400 > n^1/2 , so looks not a big concern.
// There is also a quantum ceiling. It will not be anywhere near. At n=1e20, 0.5eV, the ceiling is only down to 29; it requires cold dense conditions to apply.
if (lnLambda < 2.0) lnLambda = 2.0; // deal with negative inputs
} else {
lnLambda = 20.0;
};
return lnLambda;
}
__device__ __forceinline__ f64_vec2 Anticlock_rotate2(const f64_vec2 arg)
{
f64_vec2 result;
result.x = Anticlockwise_d.xx*arg.x+Anticlockwise_d.xy*arg.y;
result.y = Anticlockwise_d.yx*arg.x+Anticlockwise_d.yy*arg.y;
return result;
}
__device__ __forceinline__ f64_vec2 Clockwise_rotate2(const f64_vec2 arg)
{
f64_vec2 result;
result.x = Clockwise_d.xx*arg.x+Clockwise_d.xy*arg.y;
result.y = Clockwise_d.yx*arg.x+Clockwise_d.yy*arg.y;
return result;
}
__device__ __forceinline__ f64_vec3 Anticlock_rotate3(const f64_vec3 arg)
{
f64_vec3 result;
result.x = Anticlockwise_d.xx*arg.x+Anticlockwise_d.xy*arg.y;
result.y = Anticlockwise_d.yx*arg.x+Anticlockwise_d.yy*arg.y;
result.z = arg.z;
return result;
}
__device__ __forceinline__ f64_vec3 Clockwise_rotate3(const f64_vec3 arg)
{
f64_vec3 result;
result.x = Clockwise_d.xx*arg.x+Clockwise_d.xy*arg.y;
result.y = Clockwise_d.yx*arg.x+Clockwise_d.yy*arg.y;
result.z = arg.z;
return result;
}
__device__ __forceinline__ void Estimate_Ion_Neutral_Cross_sections_d(real T, // call with T in electronVolts
real * p_sigma_in_MT,
real * p_sigma_in_visc)
{
if (T > cross_T_vals_d[9]) {
*p_sigma_in_MT = cross_s_vals_MT_ni_d[9];
*p_sigma_in_visc = cross_s_vals_viscosity_ni_d[9];
return;
}
if (T < cross_T_vals_d[0]) {
*p_sigma_in_MT = cross_s_vals_MT_ni_d[0];
*p_sigma_in_visc = cross_s_vals_viscosity_ni_d[0];
return;
}
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
}
else {
i = 8;
};
}
else {
if (T > cross_T_vals_d[6]) {
i = 7;
}
else {
i = 6;
};
};
}
else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
}
else {
i = 4;
};
}
else {
if (T > cross_T_vals_d[2]) {
i = 3;
}
else {
if (T > cross_T_vals_d[1]) {
i = 2;
}
else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T - cross_T_vals_d[i - 1]) / (cross_T_vals_d[i] - cross_T_vals_d[i - 1]);
*p_sigma_in_MT = ppn * cross_s_vals_MT_ni_d[i] + (1.0 - ppn)*cross_s_vals_MT_ni_d[i - 1];
*p_sigma_in_visc = ppn * cross_s_vals_viscosity_ni_d[i] + (1.0 - ppn)*cross_s_vals_viscosity_ni_d[i - 1];
return;
}
__device__ __forceinline__ f64 Estimate_Neutral_MT_Cross_section_d(f64 T)
{
// CALL WITH T IN eV
if (T > cross_T_vals_d[9]) return cross_s_vals_MT_ni_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_MT_ni_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_MT_ni_d[i] + (1.0-ppn)*cross_s_vals_MT_ni_d[i-1];
}
__device__ __forceinline__ f64 Estimate_Neutral_Neutral_Viscosity_Cross_section_d(f64 T)
{
// call with T in electronVolts
if (T > cross_T_vals_d[9]) return cross_s_vals_viscosity_nn_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_viscosity_nn_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_viscosity_nn_d[i] + (1.0-ppn)*cross_s_vals_viscosity_nn_d[i-1];
}
__device__ __forceinline__ f64 Estimate_Ion_Neutral_Viscosity_Cross_section(f64 T)
{
if (T > cross_T_vals_d[9]) return cross_s_vals_viscosity_ni_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_viscosity_ni_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_viscosity_ni_d[i] + (1.0-ppn)*cross_s_vals_viscosity_ni_d[i-1];
}
__device__ __forceinline__ f64 Calculate_Kappa_Neutral(f64 n_i, f64 T_i, f64 n_n, f64 T_n)
{
// NOTE:
// It involves sqrt and we could easily find a way to calculate only once.
if (n_n == 0.0) return 0.0;
f64 s_in_visc, s_nn_visc;
s_in_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_i*one_over_kB);
s_nn_visc = Estimate_Neutral_Neutral_Viscosity_Cross_section_d(T_n*one_over_kB);
// Oh. So there's another two we have to port.
// Yet for ion eta it's so different, apparently.
f64 ionneut_thermal = sqrt(T_i/m_ion+T_n/m_n);
f64 nu_ni_visc = n_i*s_in_visc*ionneut_thermal;
f64 nu_nn_visc = n_n*s_nn_visc*sqrt(T_n/m_n);
f64 nu_nheart = 0.75*nu_ni_visc + 0.25*nu_nn_visc;
f64 kappa_n = NEUTRAL_KAPPA_FACTOR*n_n*T_n/(m_n*nu_nheart);
// NEUTRAL_KAPPA_FACTOR should be in constant.h
// e-n does not feature.
return kappa_n;
}
__device__ __forceinline__ void Get_kappa_parallels_and_nu_hearts
(real n_n,real T_n,real n_i,real T_i,real n_e,real T_e,
f64 * pkappa_neut, f64 * pnu_nheart,
f64 * pkappa_ion_par, f64 * pnu_iheart,
f64 * pkappa_e_par, f64 * pnu_eheart,
f64 * pratio)
{
f64 s_in_visc, s_nn_visc, s_en_visc;
f64 ionneut_thermal,
nu_ni_visc, nu_nn_visc, nu_nheart,
nu_in_visc, nu_en_visc, nu_ii, nu_iheart, nu_eheart,
sqrt_Te, electron_thermal, nu_eiBar;
f64 lnLambda = Get_lnLambda_ion_d(n_i,T_i);
ionneut_thermal = sqrt(T_i/m_ion+T_n/m_n);
sqrt_Te = sqrt(T_e);
s_in_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_i*one_over_kB);
s_nn_visc = Estimate_Neutral_Neutral_Viscosity_Cross_section_d(T_n*one_over_kB);
nu_in_visc = n_n*s_in_visc*ionneut_thermal;
nu_nn_visc = n_n*s_nn_visc*sqrt(T_n/m_n);
nu_ni_visc = n_i*s_in_visc*ionneut_thermal;
nu_ii = Nu_ii_Factor*kB_to_3halves*n_i*lnLambda/(T_i*sqrt(T_i));
nu_iheart = 0.75*nu_in_visc
+ 0.8*nu_ii-0.25*nu_in_visc*nu_ni_visc/(3.0*nu_ni_visc+nu_nn_visc);
*pkappa_ion_par = 2.5*n_i*T_i/(m_ion*(nu_iheart));
*pnu_iheart = nu_iheart;
s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_e*one_over_kB);
electron_thermal = (sqrt_Te*over_sqrt_m_e);
lnLambda = Get_lnLambda_d(n_e,T_e);
nu_eiBar = nu_eiBarconst*kB_to_3halves*n_i*lnLambda/(T_e*sqrt_Te);
nu_en_visc = n_n*s_en_visc*electron_thermal;
nu_eheart = 1.87*nu_eiBar + nu_en_visc;
*pnu_eheart = nu_eheart;
*pkappa_e_par = 2.5*n_e*T_e/(m_e*nu_eheart);
// Store ratio for thermoelectric use:
*pratio = nu_eiBar/nu_eheart;
if (n_n == 0.0){
*pkappa_neut = 0.0;
} else {
nu_nheart = 0.75*nu_ni_visc + 0.25*nu_nn_visc;
*pkappa_neut = NEUTRAL_KAPPA_FACTOR*n_n*T_n/(m_n*nu_nheart);
*pnu_nheart = nu_nheart;
// NEUTRAL_KAPPA_FACTOR should be in constant.h
// e-n does not feature.
};
}
__device__ __forceinline__ void RotateClockwise(f64_vec3 & v)
{
f64 temp = Clockwise_d.xx*v.x + Clockwise_d.xy*v.y;
v.y = Clockwise_d.yx*v.x + Clockwise_d.yy*v.y;
v.x = temp;
}
__device__ __forceinline__ void RotateAnticlockwise(f64_vec3 & v)
{
f64 temp = Anticlockwise_d.xx*v.x + Anticlockwise_d.xy*v.y;
v.y = Anticlockwise_d.yx*v.x + Anticlockwise_d.yy*v.y;
v.x = temp;
}
__device__ __forceinline__ f64_vec2 GetRadiusIntercept(f64_vec2 x1,f64_vec2 x2,f64 r)
{
// where we meet radius r on the line passing through u0 and u1?
f64_vec2 result;
f64 den = (x2.x-x1.x)*(x2.x-x1.x) + (x2.y - x1.y)*(x2.y - x1.y) ;
f64 a = (x1.x * (x2.x-x1.x) + x1.y * (x2.y-x1.y) ) / den;
// (t + a)^2 - a^2 = ( c^2 - x1.x^2 - x1.y^2 )/den
f64 root = sqrt( (r*r- x1.x*x1.x - x1.y*x1.y)/den + a*a ) ;
f64 t1 = root - a;
f64 t2 = -root - a;
// since this is a sufficient condition to satisfy the circle, this probably means that
// the other solution is on the other side of the circle.
// Which root is within x1, x2 ? Remember x2 would be t = 1.
if (t1 > 1.0)
{
if ((t2 < 0.0) || (t2 > 1.0))
{
// This usually means one of the points actually is on the curve.
f64 dist1 = min(fabs(t1-1.0),fabs(t1));
f64 dist2 = min(fabs(t2-1.0),fabs(t2));
if (dist1 < dist2)
{
// use t1
result.x = x1.x + t1*(x2.x-x1.x);
result.y = x1.y + t1*(x2.y-x1.y);
// printf("t1@@");
} else {
// use t2
result.x = x1.x + t2*(x2.x-x1.x);
result.y = x1.y + t2*(x2.y-x1.y);
// printf("t2@@");
};
} else {
// use t2:
result.x = x1.x + t2*(x2.x-x1.x);
result.y = x1.y + t2*(x2.y-x1.y);
// printf("t2~");
};
} else {
result.x = x1.x + t1*(x2.x-x1.x);
result.y = x1.y + t1*(x2.y-x1.y);
//printf("t1~");
};
// For some reason this is only hitting the radius to single precision.
// printf to compare difference between achieved radius and r.
//if ((result.x < -0.145) && (result.x > -0.155))
//{
// f64 achieve = result.modulus();
// printf("ach %1.12E r %1.2f t1 %1.10E \nx %1.12E y %1.12E\n",achieve,r,t1,result.x,result.y);
//}
// So what do we do?
// We could boost back but there seem to be bigger problems thereafter.
// Ideally we'd go through and compare and see, is it t1 that is a bit wrong here?
//
return result;
}
| 0c72419a37c18ca32d072fafbc6b455b522b990b.cu |
// Device routines that can be #included by the kernels file.
#include "cuda_struct.h"
#include "kernel.h"
#ifdef __CUDACC__
__device__ __forceinline__ f64 GetEzShape(f64 r) {
return 1.0 - 1.0 / (1.0 + exp(-24.0*(r - 4.32)));
// return 1.0 - 1.0 / (1.0 + exp(-16.0*(r - 4.2))); // At 4.0cm it is 96% as strong as at tooth. At 4.4 it is 4%.
}
#else
f64 inline GetEzShape_(f64 r) {
return 1.0 - 1.0 / (1.0 + exp(-16.0*(r - 4.2))); // At 4.0cm it is 96% as strong as at tooth. 4.2 50%. At 4.4 it is 4%.
}
#endif
__device__ __forceinline__ f64 Get_lnLambda_ion_d(f64 n_ion,f64 T_ion)
{
// Assume static f64 const is no good in kernel.
f64 factor, lnLambda_sq;
f64 Tion_eV3 = T_ion*T_ion*T_ion*one_over_kB_cubed;
f64 lnLambda = 23.0 - 0.5*log(n_ion/Tion_eV3);
// floor at 2.0:
lnLambda_sq = lnLambda*lnLambda;
factor = 1.0+0.5*lnLambda+0.25*lnLambda_sq+0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0/factor;
return lnLambda;
}
__device__ __forceinline__ f64 Get_lnLambda_d(real n_e,real T_e)
{
real lnLambda, factor, lnLambda_sq, lnLambda1, lnLambda2;
real Te_eV = T_e*one_over_kB;
real Te_eV2 = Te_eV*Te_eV;
real Te_eV3 = Te_eV*Te_eV2;
if (n_e*Te_eV3 > 0.0) {
lnLambda1 = 23.0 - 0.5*log(n_e/Te_eV3);
lnLambda2 = 24.0 - 0.5*log(n_e/Te_eV2);
// smooth between the two:
factor = 2.0*fabs(Te_eV-10.0)*(Te_eV-10.0)/(1.0+4.0*(Te_eV-10.0)*(Te_eV-10.0));
lnLambda = lnLambda1*(0.5-factor)+lnLambda2*(0.5+factor);
// floor at 2 just in case, but it should not get near:
lnLambda_sq = lnLambda*lnLambda;
factor = 1.0+0.5*lnLambda+0.25*lnLambda_sq+0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0/factor;
// Golant p.40 warns that it becomes invalid when an electron gyroradius is less than a Debye radius.
// That is something to worry about if B/400 > n^1/2 , so looks not a big concern.
// There is also a quantum ceiling. It will not be anywhere near. At n=1e20, 0.5eV, the ceiling is only down to 29; it requires cold dense conditions to apply.
if (lnLambda < 2.0) lnLambda = 2.0; // deal with negative inputs
} else {
lnLambda = 20.0;
};
return lnLambda;
}
__device__ __forceinline__ f64_vec2 Anticlock_rotate2(const f64_vec2 arg)
{
f64_vec2 result;
result.x = Anticlockwise_d.xx*arg.x+Anticlockwise_d.xy*arg.y;
result.y = Anticlockwise_d.yx*arg.x+Anticlockwise_d.yy*arg.y;
return result;
}
__device__ __forceinline__ f64_vec2 Clockwise_rotate2(const f64_vec2 arg)
{
f64_vec2 result;
result.x = Clockwise_d.xx*arg.x+Clockwise_d.xy*arg.y;
result.y = Clockwise_d.yx*arg.x+Clockwise_d.yy*arg.y;
return result;
}
__device__ __forceinline__ f64_vec3 Anticlock_rotate3(const f64_vec3 arg)
{
f64_vec3 result;
result.x = Anticlockwise_d.xx*arg.x+Anticlockwise_d.xy*arg.y;
result.y = Anticlockwise_d.yx*arg.x+Anticlockwise_d.yy*arg.y;
result.z = arg.z;
return result;
}
__device__ __forceinline__ f64_vec3 Clockwise_rotate3(const f64_vec3 arg)
{
f64_vec3 result;
result.x = Clockwise_d.xx*arg.x+Clockwise_d.xy*arg.y;
result.y = Clockwise_d.yx*arg.x+Clockwise_d.yy*arg.y;
result.z = arg.z;
return result;
}
__device__ __forceinline__ void Estimate_Ion_Neutral_Cross_sections_d(real T, // call with T in electronVolts
real * p_sigma_in_MT,
real * p_sigma_in_visc)
{
if (T > cross_T_vals_d[9]) {
*p_sigma_in_MT = cross_s_vals_MT_ni_d[9];
*p_sigma_in_visc = cross_s_vals_viscosity_ni_d[9];
return;
}
if (T < cross_T_vals_d[0]) {
*p_sigma_in_MT = cross_s_vals_MT_ni_d[0];
*p_sigma_in_visc = cross_s_vals_viscosity_ni_d[0];
return;
}
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
}
else {
i = 8;
};
}
else {
if (T > cross_T_vals_d[6]) {
i = 7;
}
else {
i = 6;
};
};
}
else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
}
else {
i = 4;
};
}
else {
if (T > cross_T_vals_d[2]) {
i = 3;
}
else {
if (T > cross_T_vals_d[1]) {
i = 2;
}
else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T - cross_T_vals_d[i - 1]) / (cross_T_vals_d[i] - cross_T_vals_d[i - 1]);
*p_sigma_in_MT = ppn * cross_s_vals_MT_ni_d[i] + (1.0 - ppn)*cross_s_vals_MT_ni_d[i - 1];
*p_sigma_in_visc = ppn * cross_s_vals_viscosity_ni_d[i] + (1.0 - ppn)*cross_s_vals_viscosity_ni_d[i - 1];
return;
}
__device__ __forceinline__ f64 Estimate_Neutral_MT_Cross_section_d(f64 T)
{
// CALL WITH T IN eV
if (T > cross_T_vals_d[9]) return cross_s_vals_MT_ni_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_MT_ni_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_MT_ni_d[i] + (1.0-ppn)*cross_s_vals_MT_ni_d[i-1];
}
__device__ __forceinline__ f64 Estimate_Neutral_Neutral_Viscosity_Cross_section_d(f64 T)
{
// call with T in electronVolts
if (T > cross_T_vals_d[9]) return cross_s_vals_viscosity_nn_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_viscosity_nn_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_viscosity_nn_d[i] + (1.0-ppn)*cross_s_vals_viscosity_nn_d[i-1];
}
__device__ __forceinline__ f64 Estimate_Ion_Neutral_Viscosity_Cross_section(f64 T)
{
if (T > cross_T_vals_d[9]) return cross_s_vals_viscosity_ni_d[9];
if (T < cross_T_vals_d[0]) return cross_s_vals_viscosity_ni_d[0];
int i = 1;
//while (T > cross_T_vals_d[i]) i++;
if (T > cross_T_vals_d[5]) {
if (T > cross_T_vals_d[7]) {
if (T > cross_T_vals_d[8])
{
i = 9; // top of interval
} else {
i = 8;
};
} else {
if (T > cross_T_vals_d[6]) {
i = 7;
} else {
i = 6;
};
};
} else {
if (T > cross_T_vals_d[3]) {
if (T > cross_T_vals_d[4]) {
i = 5;
} else {
i = 4;
};
} else {
if (T > cross_T_vals_d[2]) {
i = 3;
} else {
if (T > cross_T_vals_d[1]) {
i = 2;
} else {
i = 1;
};
};
};
};
// T lies between i-1,i
real ppn = (T-cross_T_vals_d[i-1])/(cross_T_vals_d[i]-cross_T_vals_d[i-1]);
return ppn*cross_s_vals_viscosity_ni_d[i] + (1.0-ppn)*cross_s_vals_viscosity_ni_d[i-1];
}
__device__ __forceinline__ f64 Calculate_Kappa_Neutral(f64 n_i, f64 T_i, f64 n_n, f64 T_n)
{
// NOTE:
// It involves sqrt and we could easily find a way to calculate only once.
if (n_n == 0.0) return 0.0;
f64 s_in_visc, s_nn_visc;
s_in_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_i*one_over_kB);
s_nn_visc = Estimate_Neutral_Neutral_Viscosity_Cross_section_d(T_n*one_over_kB);
// Oh. So there's another two we have to port.
// Yet for ion eta it's so different, apparently.
f64 ionneut_thermal = sqrt(T_i/m_ion+T_n/m_n);
f64 nu_ni_visc = n_i*s_in_visc*ionneut_thermal;
f64 nu_nn_visc = n_n*s_nn_visc*sqrt(T_n/m_n);
f64 nu_nheart = 0.75*nu_ni_visc + 0.25*nu_nn_visc;
f64 kappa_n = NEUTRAL_KAPPA_FACTOR*n_n*T_n/(m_n*nu_nheart);
// NEUTRAL_KAPPA_FACTOR should be in constant.h
// e-n does not feature.
return kappa_n;
}
__device__ __forceinline__ void Get_kappa_parallels_and_nu_hearts
(real n_n,real T_n,real n_i,real T_i,real n_e,real T_e,
f64 * pkappa_neut, f64 * pnu_nheart,
f64 * pkappa_ion_par, f64 * pnu_iheart,
f64 * pkappa_e_par, f64 * pnu_eheart,
f64 * pratio)
{
f64 s_in_visc, s_nn_visc, s_en_visc;
f64 ionneut_thermal,
nu_ni_visc, nu_nn_visc, nu_nheart,
nu_in_visc, nu_en_visc, nu_ii, nu_iheart, nu_eheart,
sqrt_Te, electron_thermal, nu_eiBar;
f64 lnLambda = Get_lnLambda_ion_d(n_i,T_i);
ionneut_thermal = sqrt(T_i/m_ion+T_n/m_n);
sqrt_Te = sqrt(T_e);
s_in_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_i*one_over_kB);
s_nn_visc = Estimate_Neutral_Neutral_Viscosity_Cross_section_d(T_n*one_over_kB);
nu_in_visc = n_n*s_in_visc*ionneut_thermal;
nu_nn_visc = n_n*s_nn_visc*sqrt(T_n/m_n);
nu_ni_visc = n_i*s_in_visc*ionneut_thermal;
nu_ii = Nu_ii_Factor*kB_to_3halves*n_i*lnLambda/(T_i*sqrt(T_i));
nu_iheart = 0.75*nu_in_visc
+ 0.8*nu_ii-0.25*nu_in_visc*nu_ni_visc/(3.0*nu_ni_visc+nu_nn_visc);
*pkappa_ion_par = 2.5*n_i*T_i/(m_ion*(nu_iheart));
*pnu_iheart = nu_iheart;
s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_e*one_over_kB);
electron_thermal = (sqrt_Te*over_sqrt_m_e);
lnLambda = Get_lnLambda_d(n_e,T_e);
nu_eiBar = nu_eiBarconst*kB_to_3halves*n_i*lnLambda/(T_e*sqrt_Te);
nu_en_visc = n_n*s_en_visc*electron_thermal;
nu_eheart = 1.87*nu_eiBar + nu_en_visc;
*pnu_eheart = nu_eheart;
*pkappa_e_par = 2.5*n_e*T_e/(m_e*nu_eheart);
// Store ratio for thermoelectric use:
*pratio = nu_eiBar/nu_eheart;
if (n_n == 0.0){
*pkappa_neut = 0.0;
} else {
nu_nheart = 0.75*nu_ni_visc + 0.25*nu_nn_visc;
*pkappa_neut = NEUTRAL_KAPPA_FACTOR*n_n*T_n/(m_n*nu_nheart);
*pnu_nheart = nu_nheart;
// NEUTRAL_KAPPA_FACTOR should be in constant.h
// e-n does not feature.
};
}
__device__ __forceinline__ void RotateClockwise(f64_vec3 & v)
{
f64 temp = Clockwise_d.xx*v.x + Clockwise_d.xy*v.y;
v.y = Clockwise_d.yx*v.x + Clockwise_d.yy*v.y;
v.x = temp;
}
__device__ __forceinline__ void RotateAnticlockwise(f64_vec3 & v)
{
f64 temp = Anticlockwise_d.xx*v.x + Anticlockwise_d.xy*v.y;
v.y = Anticlockwise_d.yx*v.x + Anticlockwise_d.yy*v.y;
v.x = temp;
}
__device__ __forceinline__ f64_vec2 GetRadiusIntercept(f64_vec2 x1,f64_vec2 x2,f64 r)
{
// where we meet radius r on the line passing through u0 and u1?
f64_vec2 result;
f64 den = (x2.x-x1.x)*(x2.x-x1.x) + (x2.y - x1.y)*(x2.y - x1.y) ;
f64 a = (x1.x * (x2.x-x1.x) + x1.y * (x2.y-x1.y) ) / den;
// (t + a)^2 - a^2 = ( c^2 - x1.x^2 - x1.y^2 )/den
f64 root = sqrt( (r*r- x1.x*x1.x - x1.y*x1.y)/den + a*a ) ;
f64 t1 = root - a;
f64 t2 = -root - a;
// since this is a sufficient condition to satisfy the circle, this probably means that
// the other solution is on the other side of the circle.
// Which root is within x1, x2 ? Remember x2 would be t = 1.
if (t1 > 1.0)
{
if ((t2 < 0.0) || (t2 > 1.0))
{
// This usually means one of the points actually is on the curve.
f64 dist1 = min(fabs(t1-1.0),fabs(t1));
f64 dist2 = min(fabs(t2-1.0),fabs(t2));
if (dist1 < dist2)
{
// use t1
result.x = x1.x + t1*(x2.x-x1.x);
result.y = x1.y + t1*(x2.y-x1.y);
// printf("t1@@");
} else {
// use t2
result.x = x1.x + t2*(x2.x-x1.x);
result.y = x1.y + t2*(x2.y-x1.y);
// printf("t2@@");
};
} else {
// use t2:
result.x = x1.x + t2*(x2.x-x1.x);
result.y = x1.y + t2*(x2.y-x1.y);
// printf("t2~");
};
} else {
result.x = x1.x + t1*(x2.x-x1.x);
result.y = x1.y + t1*(x2.y-x1.y);
//printf("t1~");
};
// For some reason this is only hitting the radius to single precision.
// printf to compare difference between achieved radius and r.
//if ((result.x < -0.145) && (result.x > -0.155))
//{
// f64 achieve = result.modulus();
// printf("ach %1.12E r %1.2f t1 %1.10E \nx %1.12E y %1.12E\n",achieve,r,t1,result.x,result.y);
//}
// So what do we do?
// We could boost back but there seem to be bigger problems thereafter.
// Ideally we'd go through and compare and see, is it t1 that is a bit wrong here?
//
return result;
}
|
93a45192fa812353911d52d394f44038a30a880c.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include "_interface_defs.h"
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "matrix_util.h"
#include "pogs.h"
#include "sinkhorn_knopp.cuh"
// Apply operator to h.a and h.d.
template <typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) : binary_op(binary_op) { }
__device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x); h.d = binary_op(h.d, x);
return h;
}
};
// Proximal Operator Graph Solver.
template <typename T, typename M>
int Pogs(PogsData<T, M> *pogs_data) {
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05);
const T kDeltaMax = static_cast<T>(2);
const T kGamma = static_cast<T>(1.01);
const T kTau = static_cast<T>(0.8);
const T kAlpha = static_cast<T>(1.7);
const T kKappa = static_cast<T>(0.9);
int err = 0;
// Extract values from pogs_data
size_t m = pogs_data->m, n = pogs_data->n, min_dim = ::min(m, n);
T rho = pogs_data->rho;
const T kOne = static_cast<T>(1), kZero = static_cast<T>(0);
thrust::device_vector<FunctionObj<T> > f = pogs_data->f;
thrust::device_vector<FunctionObj<T> > g = pogs_data->g;
// Create cuBLAS hdl.
hipblasHandle_t hdl;
hipblasCreate(&hdl);
// Allocate data for ADMM variables.
bool compute_factors = true;
cml::vector<T> de, z, zt;
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
cml::vector<T> l = cml::vector_calloc<T>(m);
cml::matrix<T> A, L;
cml::matrix<T> C = cml::matrix_alloc<T>(m + n, 2);
if (pogs_data->factors != 0) {
hipMemcpy(&rho, pogs_data->factors, sizeof(T), hipMemcpyDeviceToHost);
if (rho > 0) {
compute_factors = false;
} else {
rho = pogs_data->rho;
}
de = cml::vector_view_array(pogs_data->factors + 1, m + n);
z = cml::vector_view_array(pogs_data->factors + 1 + m + n, m + n);
zt = cml::vector_view_array(pogs_data->factors + 1 + 2 * (m + n), m + n);
L = cml::matrix_view_array(pogs_data->factors + 1 + 3 * (m + n), min_dim,
min_dim);
A = cml::matrix_view_array(pogs_data->factors + 1 + 3 * (m + n) +
min_dim * min_dim, m, n);
} else {
de = cml::vector_calloc<T>(m + n);
z = cml::vector_calloc<T>(m + n);
zt = cml::vector_calloc<T>(m + n);
L = cml::matrix_alloc<T>(min_dim, min_dim);
A = cml::matrix_alloc<T>(m, n);
}
if (de.data == 0 || z.data == 0 || zt.data == 0 || zprev.data == 0 ||
z12.data == 0 || l.data == 0 || A.data == 0 || L.data == 0 || C.data == 0)
err = 1;
// Create views for x and y components.
cml::matrix<T> Cx = cml::matrix_submatrix(&C, 0, 0, n, 2);
cml::matrix<T> Cy = cml::matrix_submatrix(&C, n, 0, m, 2);
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> cz0 = cml::matrix_column(&C, 0);
cml::vector<T> cx0 = cml::vector_subvector(&cz0, 0, n);
cml::vector<T> cy0 = cml::vector_subvector(&cz0, n, m);
cml::vector<T> cz1 = cml::matrix_column(&C, 1);
cml::vector<T> cx1 = cml::vector_subvector(&cz1, 0, n);
cml::vector<T> cy1 = cml::vector_subvector(&cz1, n, m);
if (compute_factors && !err) {
// Copy A to device (assume input row-major).
T *Acm = new T[m * n];
RowToColMajor(pogs_data->A, m, n, Acm);
err = Equilibrate(Acm, &d, &e);
cml::matrix_memcpy(&A, Acm);
delete [] Acm;
if (!err) {
// Compuate A^TA or AA^T.
hipblasOperation_t op_type = m >= n ? HIPBLAS_OP_T : HIPBLAS_OP_N;
cml::blas_syrk(hdl, HIPBLAS_FILL_MODE_LOWER, op_type, kOne, &A, kZero, &L);
// Scale A.
cml::vector<T> diag_L = cml::matrix_diagonal(&L);
T mean_diag = cml::blas_asum(hdl, &diag_L) / static_cast<T>(min_dim);
T sqrt_mean_diag = sqrt(mean_diag);
cml::matrix_scale(&L, kOne / mean_diag);
cml::matrix_scale(&A, kOne / sqrt_mean_diag);
T factor = sqrt(cml::blas_nrm2(hdl, &d) * sqrt(static_cast<T>(n)) /
(cml::blas_nrm2(hdl, &e) * sqrt(static_cast<T>(m))));
cml::blas_scal(hdl, kOne / (factor * sqrt(sqrt_mean_diag)), &d);
cml::blas_scal(hdl, factor / sqrt(sqrt_mean_diag), &e);
// Compute cholesky decomposition of (I + A^TA) or (I + AA^T)
cml::vector_add_constant(&diag_L, kOne);
cml::linalg_cholesky_decomp(hdl, &L);
}
}
// Scale f and g to account for diagonal scaling e and d.
if (!err) {
thrust::transform(f.begin(), f.end(), thrust::device_pointer_cast(d.data),
f.begin(), ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
thrust::transform(g.begin(), g.end(), thrust::device_pointer_cast(e.data),
g.begin(), ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
}
// Signal start of execution.
if (!pogs_data->quiet)
Printf(" # res_pri eps_pri res_dual eps_dual"
" gap eps_gap objective\n");
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * pogs_data->abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * pogs_data->abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * pogs_data->abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int kd = 0, ku = 0;
for (unsigned int k = 0; k < pogs_data->max_iter && !err; ++k) {
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators
cml::vector_memcpy(&cz0, &z);
cml::blas_axpy(hdl, -kOne, &zt, &cz0);
ProxEval(g, rho, cx0.data, x12.data);
ProxEval(f, rho, cy0.data, y12.data);
// Compute Gap.
T gap, nrm_r, nrm_s;
cml::blas_axpy(hdl, -kOne, &z12, &cz0);
cml::blas_dot(hdl, &cz0, &z12, &gap);
gap = fabs(gap * rho);
T obj = FuncEval(f, y12.data) + FuncEval(g, x12.data);
T eps_pri = sqrtm_atol + pogs_data->rel_tol * cml::blas_nrm2(hdl, &z12);
T eps_dual = sqrtn_atol +
pogs_data->rel_tol * rho * cml::blas_nrm2(hdl, &cz0);
T eps_gap = sqrtmn_atol + pogs_data->rel_tol * fabs(obj);
// Store dual variable
if (pogs_data->l != 0)
cml::vector_memcpy(&l, &cy0);
// Project and Update Dual Variables
if (m >= n) {
cml::blas_gemv(hdl, HIPBLAS_OP_T, kOne, &A, &cy0, kOne, &cx0);
nrm_s = rho * cml::blas_nrm2(hdl, &cx0);
cml::linalg_cholesky_svx(hdl, &L, &cx0);
cml::vector_memcpy(&cy0, &y);
cml::vector_memcpy(&cz1, &z12);
cml::blas_gemm(hdl, HIPBLAS_OP_N, HIPBLAS_OP_N, -kOne, &A, &Cx, kOne, &Cy);
nrm_r = cml::blas_nrm2(hdl, &cy1);
cml::vector_memcpy(&y, &cy0);
cml::blas_axpy(hdl, -kOne, &cx0, &x);
} else {
cml::vector_memcpy(&z, &z12);
cml::blas_gemv(hdl, HIPBLAS_OP_N, kOne, &A, &x, -kOne, &y);
nrm_r = cml::blas_nrm2(hdl, &y);
cml::linalg_cholesky_svx(hdl, &L, &y);
cml::vector_memcpy(&cy1, &y);
cml::vector_memcpy(&cx1, &x12);
cml::blas_scal(hdl, -kOne, &cy0);
cml::blas_gemm(hdl, HIPBLAS_OP_T, HIPBLAS_OP_N, -kOne, &A, &Cy, kOne, &Cx);
nrm_s = rho * cml::blas_nrm2(hdl, &cx0);
cml::vector_memcpy(&x, &cx1);
cml::blas_axpy(hdl, kOne, &y12, &y);
}
// Apply over relaxation.
cml::blas_scal(hdl, kAlpha, &z);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &z);
// Update dual variable.
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
// Evaluate stopping criteria.
bool converged = nrm_r < eps_pri && nrm_s < eps_dual && gap < eps_gap;
if (!pogs_data->quiet && (k % 10 == 0 || converged))
Printf("%4d : %.3e %.3e %.3e %.3e %.3e %.3e %.3e\n",
k, nrm_r, eps_pri, nrm_s, eps_dual, gap, eps_gap, obj);
if (converged)
break;
// Rescale rho.
if (pogs_data->adaptive_rho) {
if (nrm_s < xi * eps_dual && nrm_r > xi * eps_pri && kTau * k > kd) {
rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = ::min(kGamma * delta, kDeltaMax);
ku = k;
} else if (nrm_s > xi * eps_dual && nrm_r < xi * eps_pri &&
kTau * k > ku) {
rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = ::min(kGamma * delta, kDeltaMax);
kd = k;
} else if (nrm_s < xi * eps_dual && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = ::max(delta / kGamma, kDeltaMin);
}
}
}
pogs_data->optval = FuncEval(f, y12.data) + FuncEval(g, x12.data);
// Scale x, y and l for output.
cml::vector_div(&y12, &d);
cml::vector_mul(&x12, &e);
cml::vector_mul(&l, &d);
cml::blas_scal(hdl, rho, &l);
// Copy results to output.
if (pogs_data->y != 0 && !err)
cml::vector_memcpy(pogs_data->y, &y12);
if (pogs_data->x != 0 && !err)
cml::vector_memcpy(pogs_data->x, &x12);
if (pogs_data->l != 0 && !err)
cml::vector_memcpy(pogs_data->l, &l);
// Store rho and free memory.
if (pogs_data->factors != 0 && !err) {
hipMemcpy(pogs_data->factors, &rho, sizeof(T), hipMemcpyHostToDevice);
} else {
cml::vector_free(&de);
cml::vector_free(&z);
cml::vector_free(&zt);
cml::matrix_free(&L);
cml::matrix_free(&A);
}
cml::matrix_free(&C);
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&l);
return err;
}
template <>
int AllocFactors(PogsData<double, double*> *pogs_data) {
size_t m = pogs_data->m, n = pogs_data->n;
size_t flen = 1 + 3 * (m + n) + ::min(m, n) * ::min(m, n) + m * n;
hipError_t err = hipMalloc(&pogs_data->factors, flen * sizeof(double));
if (err == hipSuccess) {
hipMemset(pogs_data->factors, 0, flen * sizeof(double));
return 0;
} else {
return 1;
}
}
template <>
int AllocFactors(PogsData<float, float*> *pogs_data) {
size_t m = pogs_data->m, n = pogs_data->n;
size_t flen = 1 + 3 * (m + n) + ::min(m, n) * ::min(m, n) + m * n;
hipError_t err = hipMalloc(&pogs_data->factors, flen * sizeof(float));
if (err == hipSuccess) {
hipMemset(pogs_data->factors, 0, flen * sizeof(float));
return 0;
} else {
return 1;
}
}
template <>
void FreeFactors(PogsData<double, double*> *pogs_data) {
hipFree(pogs_data->factors);
}
template <>
void FreeFactors(PogsData<float, float*> *pogs_data) {
hipFree(pogs_data->factors);
}
template int Pogs<double>(PogsData<double, double*> *);
template int Pogs<float>(PogsData<float, float*> *);
| 93a45192fa812353911d52d394f44038a30a880c.cu | #include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include "_interface_defs.h"
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "matrix_util.h"
#include "pogs.h"
#include "sinkhorn_knopp.cuh"
// Apply operator to h.a and h.d.
template <typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) : binary_op(binary_op) { }
__device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x); h.d = binary_op(h.d, x);
return h;
}
};
// Proximal Operator Graph Solver.
template <typename T, typename M>
int Pogs(PogsData<T, M> *pogs_data) {
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05);
const T kDeltaMax = static_cast<T>(2);
const T kGamma = static_cast<T>(1.01);
const T kTau = static_cast<T>(0.8);
const T kAlpha = static_cast<T>(1.7);
const T kKappa = static_cast<T>(0.9);
int err = 0;
// Extract values from pogs_data
size_t m = pogs_data->m, n = pogs_data->n, min_dim = std::min(m, n);
T rho = pogs_data->rho;
const T kOne = static_cast<T>(1), kZero = static_cast<T>(0);
thrust::device_vector<FunctionObj<T> > f = pogs_data->f;
thrust::device_vector<FunctionObj<T> > g = pogs_data->g;
// Create cuBLAS hdl.
cublasHandle_t hdl;
cublasCreate(&hdl);
// Allocate data for ADMM variables.
bool compute_factors = true;
cml::vector<T> de, z, zt;
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
cml::vector<T> l = cml::vector_calloc<T>(m);
cml::matrix<T> A, L;
cml::matrix<T> C = cml::matrix_alloc<T>(m + n, 2);
if (pogs_data->factors != 0) {
cudaMemcpy(&rho, pogs_data->factors, sizeof(T), cudaMemcpyDeviceToHost);
if (rho > 0) {
compute_factors = false;
} else {
rho = pogs_data->rho;
}
de = cml::vector_view_array(pogs_data->factors + 1, m + n);
z = cml::vector_view_array(pogs_data->factors + 1 + m + n, m + n);
zt = cml::vector_view_array(pogs_data->factors + 1 + 2 * (m + n), m + n);
L = cml::matrix_view_array(pogs_data->factors + 1 + 3 * (m + n), min_dim,
min_dim);
A = cml::matrix_view_array(pogs_data->factors + 1 + 3 * (m + n) +
min_dim * min_dim, m, n);
} else {
de = cml::vector_calloc<T>(m + n);
z = cml::vector_calloc<T>(m + n);
zt = cml::vector_calloc<T>(m + n);
L = cml::matrix_alloc<T>(min_dim, min_dim);
A = cml::matrix_alloc<T>(m, n);
}
if (de.data == 0 || z.data == 0 || zt.data == 0 || zprev.data == 0 ||
z12.data == 0 || l.data == 0 || A.data == 0 || L.data == 0 || C.data == 0)
err = 1;
// Create views for x and y components.
cml::matrix<T> Cx = cml::matrix_submatrix(&C, 0, 0, n, 2);
cml::matrix<T> Cy = cml::matrix_submatrix(&C, n, 0, m, 2);
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> cz0 = cml::matrix_column(&C, 0);
cml::vector<T> cx0 = cml::vector_subvector(&cz0, 0, n);
cml::vector<T> cy0 = cml::vector_subvector(&cz0, n, m);
cml::vector<T> cz1 = cml::matrix_column(&C, 1);
cml::vector<T> cx1 = cml::vector_subvector(&cz1, 0, n);
cml::vector<T> cy1 = cml::vector_subvector(&cz1, n, m);
if (compute_factors && !err) {
// Copy A to device (assume input row-major).
T *Acm = new T[m * n];
RowToColMajor(pogs_data->A, m, n, Acm);
err = Equilibrate(Acm, &d, &e);
cml::matrix_memcpy(&A, Acm);
delete [] Acm;
if (!err) {
// Compuate A^TA or AA^T.
cublasOperation_t op_type = m >= n ? CUBLAS_OP_T : CUBLAS_OP_N;
cml::blas_syrk(hdl, CUBLAS_FILL_MODE_LOWER, op_type, kOne, &A, kZero, &L);
// Scale A.
cml::vector<T> diag_L = cml::matrix_diagonal(&L);
T mean_diag = cml::blas_asum(hdl, &diag_L) / static_cast<T>(min_dim);
T sqrt_mean_diag = sqrt(mean_diag);
cml::matrix_scale(&L, kOne / mean_diag);
cml::matrix_scale(&A, kOne / sqrt_mean_diag);
T factor = sqrt(cml::blas_nrm2(hdl, &d) * sqrt(static_cast<T>(n)) /
(cml::blas_nrm2(hdl, &e) * sqrt(static_cast<T>(m))));
cml::blas_scal(hdl, kOne / (factor * sqrt(sqrt_mean_diag)), &d);
cml::blas_scal(hdl, factor / sqrt(sqrt_mean_diag), &e);
// Compute cholesky decomposition of (I + A^TA) or (I + AA^T)
cml::vector_add_constant(&diag_L, kOne);
cml::linalg_cholesky_decomp(hdl, &L);
}
}
// Scale f and g to account for diagonal scaling e and d.
if (!err) {
thrust::transform(f.begin(), f.end(), thrust::device_pointer_cast(d.data),
f.begin(), ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
thrust::transform(g.begin(), g.end(), thrust::device_pointer_cast(e.data),
g.begin(), ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
}
// Signal start of execution.
if (!pogs_data->quiet)
Printf(" # res_pri eps_pri res_dual eps_dual"
" gap eps_gap objective\n");
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * pogs_data->abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * pogs_data->abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * pogs_data->abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int kd = 0, ku = 0;
for (unsigned int k = 0; k < pogs_data->max_iter && !err; ++k) {
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators
cml::vector_memcpy(&cz0, &z);
cml::blas_axpy(hdl, -kOne, &zt, &cz0);
ProxEval(g, rho, cx0.data, x12.data);
ProxEval(f, rho, cy0.data, y12.data);
// Compute Gap.
T gap, nrm_r, nrm_s;
cml::blas_axpy(hdl, -kOne, &z12, &cz0);
cml::blas_dot(hdl, &cz0, &z12, &gap);
gap = fabs(gap * rho);
T obj = FuncEval(f, y12.data) + FuncEval(g, x12.data);
T eps_pri = sqrtm_atol + pogs_data->rel_tol * cml::blas_nrm2(hdl, &z12);
T eps_dual = sqrtn_atol +
pogs_data->rel_tol * rho * cml::blas_nrm2(hdl, &cz0);
T eps_gap = sqrtmn_atol + pogs_data->rel_tol * fabs(obj);
// Store dual variable
if (pogs_data->l != 0)
cml::vector_memcpy(&l, &cy0);
// Project and Update Dual Variables
if (m >= n) {
cml::blas_gemv(hdl, CUBLAS_OP_T, kOne, &A, &cy0, kOne, &cx0);
nrm_s = rho * cml::blas_nrm2(hdl, &cx0);
cml::linalg_cholesky_svx(hdl, &L, &cx0);
cml::vector_memcpy(&cy0, &y);
cml::vector_memcpy(&cz1, &z12);
cml::blas_gemm(hdl, CUBLAS_OP_N, CUBLAS_OP_N, -kOne, &A, &Cx, kOne, &Cy);
nrm_r = cml::blas_nrm2(hdl, &cy1);
cml::vector_memcpy(&y, &cy0);
cml::blas_axpy(hdl, -kOne, &cx0, &x);
} else {
cml::vector_memcpy(&z, &z12);
cml::blas_gemv(hdl, CUBLAS_OP_N, kOne, &A, &x, -kOne, &y);
nrm_r = cml::blas_nrm2(hdl, &y);
cml::linalg_cholesky_svx(hdl, &L, &y);
cml::vector_memcpy(&cy1, &y);
cml::vector_memcpy(&cx1, &x12);
cml::blas_scal(hdl, -kOne, &cy0);
cml::blas_gemm(hdl, CUBLAS_OP_T, CUBLAS_OP_N, -kOne, &A, &Cy, kOne, &Cx);
nrm_s = rho * cml::blas_nrm2(hdl, &cx0);
cml::vector_memcpy(&x, &cx1);
cml::blas_axpy(hdl, kOne, &y12, &y);
}
// Apply over relaxation.
cml::blas_scal(hdl, kAlpha, &z);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &z);
// Update dual variable.
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
// Evaluate stopping criteria.
bool converged = nrm_r < eps_pri && nrm_s < eps_dual && gap < eps_gap;
if (!pogs_data->quiet && (k % 10 == 0 || converged))
Printf("%4d : %.3e %.3e %.3e %.3e %.3e %.3e %.3e\n",
k, nrm_r, eps_pri, nrm_s, eps_dual, gap, eps_gap, obj);
if (converged)
break;
// Rescale rho.
if (pogs_data->adaptive_rho) {
if (nrm_s < xi * eps_dual && nrm_r > xi * eps_pri && kTau * k > kd) {
rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = std::min(kGamma * delta, kDeltaMax);
ku = k;
} else if (nrm_s > xi * eps_dual && nrm_r < xi * eps_pri &&
kTau * k > ku) {
rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = std::min(kGamma * delta, kDeltaMax);
kd = k;
} else if (nrm_s < xi * eps_dual && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = std::max(delta / kGamma, kDeltaMin);
}
}
}
pogs_data->optval = FuncEval(f, y12.data) + FuncEval(g, x12.data);
// Scale x, y and l for output.
cml::vector_div(&y12, &d);
cml::vector_mul(&x12, &e);
cml::vector_mul(&l, &d);
cml::blas_scal(hdl, rho, &l);
// Copy results to output.
if (pogs_data->y != 0 && !err)
cml::vector_memcpy(pogs_data->y, &y12);
if (pogs_data->x != 0 && !err)
cml::vector_memcpy(pogs_data->x, &x12);
if (pogs_data->l != 0 && !err)
cml::vector_memcpy(pogs_data->l, &l);
// Store rho and free memory.
if (pogs_data->factors != 0 && !err) {
cudaMemcpy(pogs_data->factors, &rho, sizeof(T), cudaMemcpyHostToDevice);
} else {
cml::vector_free(&de);
cml::vector_free(&z);
cml::vector_free(&zt);
cml::matrix_free(&L);
cml::matrix_free(&A);
}
cml::matrix_free(&C);
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&l);
return err;
}
template <>
int AllocFactors(PogsData<double, double*> *pogs_data) {
size_t m = pogs_data->m, n = pogs_data->n;
size_t flen = 1 + 3 * (m + n) + std::min(m, n) * std::min(m, n) + m * n;
cudaError_t err = cudaMalloc(&pogs_data->factors, flen * sizeof(double));
if (err == cudaSuccess) {
cudaMemset(pogs_data->factors, 0, flen * sizeof(double));
return 0;
} else {
return 1;
}
}
template <>
int AllocFactors(PogsData<float, float*> *pogs_data) {
size_t m = pogs_data->m, n = pogs_data->n;
size_t flen = 1 + 3 * (m + n) + std::min(m, n) * std::min(m, n) + m * n;
cudaError_t err = cudaMalloc(&pogs_data->factors, flen * sizeof(float));
if (err == cudaSuccess) {
cudaMemset(pogs_data->factors, 0, flen * sizeof(float));
return 0;
} else {
return 1;
}
}
template <>
void FreeFactors(PogsData<double, double*> *pogs_data) {
cudaFree(pogs_data->factors);
}
template <>
void FreeFactors(PogsData<float, float*> *pogs_data) {
cudaFree(pogs_data->factors);
}
template int Pogs<double>(PogsData<double, double*> *);
template int Pogs<float>(PogsData<float, float*> *);
|
b9a3937fdbd4ea6d3a80feac7f8775c268089c4a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/cumlHandle.hpp"
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn.hpp>
#include "ml_mg_utils.h"
#include "label/classlabels.h"
#include "selection/knn.h"
#include <hip/hip_runtime.h>
#include "cuda_utils.h"
#include <sstream>
#include <vector>
namespace ML {
void brute_force_knn(cumlHandle &handle, std::vector<float *> &input,
std::vector<int> &sizes, int D, float *search_items, int n,
int64_t *res_I, float *res_D, int k, bool rowMajorIndex,
bool rowMajorQuery) {
ASSERT(input.size() == sizes.size(),
"input and sizes vectors must be the same size");
std::vector<hipStream_t> int_streams = handle.getImpl().getInternalStreams();
MLCommon::Selection::brute_force_knn(
input, sizes, D, search_items, n, res_I, res_D, k,
handle.getImpl().getDeviceAllocator(), handle.getImpl().getStream(),
int_streams.data(), handle.getImpl().getNumInternalStreams(), rowMajorIndex,
rowMajorQuery);
}
void knn_classify(cumlHandle &handle, int *out, int64_t *knn_indices,
std::vector<int *> &y, size_t n_samples, int k) {
auto d_alloc = handle.getDeviceAllocator();
hipStream_t stream = handle.getStream();
std::vector<int *> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (int i = 0; i < y.size(); i++) {
MLCommon::Label::getUniqueLabels(y[i], n_samples, &(uniq_labels[i]),
&(n_unique[i]), stream, d_alloc);
}
MLCommon::Selection::knn_classify(out, knn_indices, y, n_samples, k,
uniq_labels, n_unique, d_alloc, stream);
}
void knn_regress(cumlHandle &handle, float *out, int64_t *knn_indices,
std::vector<float *> &y, size_t n_samples, int k) {
MLCommon::Selection::knn_regress(out, knn_indices, y, n_samples, k,
handle.getStream());
}
void knn_class_proba(cumlHandle &handle, std::vector<float *> &out,
int64_t *knn_indices, std::vector<int *> &y,
size_t n_samples, int k) {
auto d_alloc = handle.getDeviceAllocator();
hipStream_t stream = handle.getStream();
std::vector<int *> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (int i = 0; i < y.size(); i++) {
MLCommon::Label::getUniqueLabels(y[i], n_samples, &(uniq_labels[i]),
&(n_unique[i]), stream, d_alloc);
}
MLCommon::Selection::class_probs(out, knn_indices, y, n_samples, k,
uniq_labels, n_unique, d_alloc, stream);
}
kNN::kNN(const cumlHandle &handle, int D, int verbosity)
: D(D), total_n(0), indices(0) {
ML::Logger::get().setLevel(verbosity);
this->handle = const_cast<cumlHandle *>(&handle);
sizes = nullptr;
ptrs = nullptr;
}
kNN::~kNN() {
if (this->indices > 0) {
reset();
}
}
void kNN::reset() {
if (this->indices > 0) {
this->indices = 0;
this->total_n = 0;
delete[] this->ptrs;
delete[] this->sizes;
}
}
/**
* Fit a kNN model by creating separate indices for multiple given
* instances of kNNParams.
* @param input an array of pointers to data on (possibly different) devices
* @param N number of items in input array.
* @param rowMajor is the input in rowMajor?
*/
void kNN::fit(std::vector<float *> &input, std::vector<int> &sizes,
bool rowMajor) {
this->rowMajorIndex = rowMajor;
int N = input.size();
CUML_LOG_DEBUG("N=%d", N);
reset();
this->indices = N;
this->ptrs = (float **)malloc(N * sizeof(float *));
this->sizes = (int *)malloc(N * sizeof(int));
for (int i = 0; i < N; i++) {
this->ptrs[i] = input[i];
this->sizes[i] = sizes[i];
}
}
/**
* Search the kNN for the k-nearest neighbors of a set of query vectors
* @param search_items set of vectors to query for neighbors
* @param n number of items in search_items
* @param res_I pointer to device memory for returning k nearest indices
* @param res_D pointer to device memory for returning k nearest distances
* @param k number of neighbors to query
* @param rowMajor is the query array in row major layout?
*/
void kNN::search(float *search_items, int n, int64_t *res_I, float *res_D,
int k, bool rowMajor) {
ASSERT(this->indices > 0, "Cannot search before model has been trained.");
std::vector<hipStream_t> int_streams =
handle->getImpl().getInternalStreams();
MLCommon::Selection::brute_force_knn(
ptrs, sizes, indices, D, search_items, n, res_I, res_D, k,
handle->getImpl().getDeviceAllocator(), handle->getImpl().getStream(),
int_streams.data(), handle->getImpl().getNumInternalStreams(),
this->rowMajorIndex, rowMajor);
}
}; // namespace ML
/**
* @brief Flat C API function to perform a brute force knn on
* a series of input arrays and combine the results into a single
* output array for indexes and distances.
*
* @param handle the cuml handle to use
* @param input an array of pointers to the input arrays
* @param sizes an array of sizes of input arrays
* @param n_params array size of input and sizes
* @param D the dimensionality of the arrays
* @param search_items array of items to search of dimensionality D
* @param n number of rows in search_items
* @param res_I the resulting index array of size n * k
* @param res_D the resulting distance array of size n * k
* @param k the number of nearest neighbors to return
* @param rowMajorIndex is the index array in row major layout?
* @param rowMajorQuery is the query array in row major layout?
*/
extern "C" cumlError_t knn_search(const cumlHandle_t handle, float **input,
int *sizes, int n_params, int D,
float *search_items, int n, int64_t *res_I,
float *res_D, int k, bool rowMajorIndex,
bool rowMajorQuery) {
cumlError_t status;
ML::cumlHandle *handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
std::vector<hipStream_t> int_streams =
handle_ptr->getImpl().getInternalStreams();
std::vector<float *> input_vec(n_params);
std::vector<int> sizes_vec(n_params);
for (int i = 0; i < n_params; i++) {
input_vec.push_back(input[i]);
sizes_vec.push_back(sizes[i]);
}
if (status == CUML_SUCCESS) {
try {
MLCommon::Selection::brute_force_knn(
input_vec, sizes_vec, D, search_items, n, res_I, res_D, k,
handle_ptr->getImpl().getDeviceAllocator(),
handle_ptr->getImpl().getStream(), int_streams.data(),
handle_ptr->getImpl().getNumInternalStreams(), rowMajorIndex,
rowMajorQuery);
} catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
| b9a3937fdbd4ea6d3a80feac7f8775c268089c4a.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/cumlHandle.hpp"
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn.hpp>
#include "ml_mg_utils.h"
#include "label/classlabels.h"
#include "selection/knn.h"
#include <cuda_runtime.h>
#include "cuda_utils.h"
#include <sstream>
#include <vector>
namespace ML {
void brute_force_knn(cumlHandle &handle, std::vector<float *> &input,
std::vector<int> &sizes, int D, float *search_items, int n,
int64_t *res_I, float *res_D, int k, bool rowMajorIndex,
bool rowMajorQuery) {
ASSERT(input.size() == sizes.size(),
"input and sizes vectors must be the same size");
std::vector<cudaStream_t> int_streams = handle.getImpl().getInternalStreams();
MLCommon::Selection::brute_force_knn(
input, sizes, D, search_items, n, res_I, res_D, k,
handle.getImpl().getDeviceAllocator(), handle.getImpl().getStream(),
int_streams.data(), handle.getImpl().getNumInternalStreams(), rowMajorIndex,
rowMajorQuery);
}
void knn_classify(cumlHandle &handle, int *out, int64_t *knn_indices,
std::vector<int *> &y, size_t n_samples, int k) {
auto d_alloc = handle.getDeviceAllocator();
cudaStream_t stream = handle.getStream();
std::vector<int *> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (int i = 0; i < y.size(); i++) {
MLCommon::Label::getUniqueLabels(y[i], n_samples, &(uniq_labels[i]),
&(n_unique[i]), stream, d_alloc);
}
MLCommon::Selection::knn_classify(out, knn_indices, y, n_samples, k,
uniq_labels, n_unique, d_alloc, stream);
}
void knn_regress(cumlHandle &handle, float *out, int64_t *knn_indices,
std::vector<float *> &y, size_t n_samples, int k) {
MLCommon::Selection::knn_regress(out, knn_indices, y, n_samples, k,
handle.getStream());
}
void knn_class_proba(cumlHandle &handle, std::vector<float *> &out,
int64_t *knn_indices, std::vector<int *> &y,
size_t n_samples, int k) {
auto d_alloc = handle.getDeviceAllocator();
cudaStream_t stream = handle.getStream();
std::vector<int *> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (int i = 0; i < y.size(); i++) {
MLCommon::Label::getUniqueLabels(y[i], n_samples, &(uniq_labels[i]),
&(n_unique[i]), stream, d_alloc);
}
MLCommon::Selection::class_probs(out, knn_indices, y, n_samples, k,
uniq_labels, n_unique, d_alloc, stream);
}
kNN::kNN(const cumlHandle &handle, int D, int verbosity)
: D(D), total_n(0), indices(0) {
ML::Logger::get().setLevel(verbosity);
this->handle = const_cast<cumlHandle *>(&handle);
sizes = nullptr;
ptrs = nullptr;
}
kNN::~kNN() {
if (this->indices > 0) {
reset();
}
}
void kNN::reset() {
if (this->indices > 0) {
this->indices = 0;
this->total_n = 0;
delete[] this->ptrs;
delete[] this->sizes;
}
}
/**
* Fit a kNN model by creating separate indices for multiple given
* instances of kNNParams.
* @param input an array of pointers to data on (possibly different) devices
* @param N number of items in input array.
* @param rowMajor is the input in rowMajor?
*/
void kNN::fit(std::vector<float *> &input, std::vector<int> &sizes,
bool rowMajor) {
this->rowMajorIndex = rowMajor;
int N = input.size();
CUML_LOG_DEBUG("N=%d", N);
reset();
this->indices = N;
this->ptrs = (float **)malloc(N * sizeof(float *));
this->sizes = (int *)malloc(N * sizeof(int));
for (int i = 0; i < N; i++) {
this->ptrs[i] = input[i];
this->sizes[i] = sizes[i];
}
}
/**
* Search the kNN for the k-nearest neighbors of a set of query vectors
* @param search_items set of vectors to query for neighbors
* @param n number of items in search_items
* @param res_I pointer to device memory for returning k nearest indices
* @param res_D pointer to device memory for returning k nearest distances
* @param k number of neighbors to query
* @param rowMajor is the query array in row major layout?
*/
void kNN::search(float *search_items, int n, int64_t *res_I, float *res_D,
int k, bool rowMajor) {
ASSERT(this->indices > 0, "Cannot search before model has been trained.");
std::vector<cudaStream_t> int_streams =
handle->getImpl().getInternalStreams();
MLCommon::Selection::brute_force_knn(
ptrs, sizes, indices, D, search_items, n, res_I, res_D, k,
handle->getImpl().getDeviceAllocator(), handle->getImpl().getStream(),
int_streams.data(), handle->getImpl().getNumInternalStreams(),
this->rowMajorIndex, rowMajor);
}
}; // namespace ML
/**
* @brief Flat C API function to perform a brute force knn on
* a series of input arrays and combine the results into a single
* output array for indexes and distances.
*
* @param handle the cuml handle to use
* @param input an array of pointers to the input arrays
* @param sizes an array of sizes of input arrays
* @param n_params array size of input and sizes
* @param D the dimensionality of the arrays
* @param search_items array of items to search of dimensionality D
* @param n number of rows in search_items
* @param res_I the resulting index array of size n * k
* @param res_D the resulting distance array of size n * k
* @param k the number of nearest neighbors to return
* @param rowMajorIndex is the index array in row major layout?
* @param rowMajorQuery is the query array in row major layout?
*/
extern "C" cumlError_t knn_search(const cumlHandle_t handle, float **input,
int *sizes, int n_params, int D,
float *search_items, int n, int64_t *res_I,
float *res_D, int k, bool rowMajorIndex,
bool rowMajorQuery) {
cumlError_t status;
ML::cumlHandle *handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
std::vector<cudaStream_t> int_streams =
handle_ptr->getImpl().getInternalStreams();
std::vector<float *> input_vec(n_params);
std::vector<int> sizes_vec(n_params);
for (int i = 0; i < n_params; i++) {
input_vec.push_back(input[i]);
sizes_vec.push_back(sizes[i]);
}
if (status == CUML_SUCCESS) {
try {
MLCommon::Selection::brute_force_knn(
input_vec, sizes_vec, D, search_items, n, res_I, res_D, k,
handle_ptr->getImpl().getDeviceAllocator(),
handle_ptr->getImpl().getStream(), int_streams.data(),
handle_ptr->getImpl().getNumInternalStreams(), rowMajorIndex,
rowMajorQuery);
} catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
|
ab09fe10a2bc3dfa520cbd76cc99ca10fc37e73d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
//code to run on the GPU
__global__ void add(float a, float b, float *c) {
*c = a + b;
}
int main(int n_args, char *args[])
{
float *dev_c;
hipMalloc((void**)&dev_c, sizeof(float));
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 1,2,dev_c); //launch add on GPU
float c;
hipMemcpy(&c,dev_c,sizeof(float),hipMemcpyDeviceToHost);
cout<<"c = "<<c<<endl;
return 0;
}
| ab09fe10a2bc3dfa520cbd76cc99ca10fc37e73d.cu | #include <cuda_runtime.h>
#include <iostream>
using namespace std;
//code to run on the GPU
__global__ void add(float a, float b, float *c) {
*c = a + b;
}
int main(int n_args, char *args[])
{
float *dev_c;
cudaMalloc((void**)&dev_c, sizeof(float));
add<<<1,1>>>(1,2,dev_c); //launch add on GPU
float c;
cudaMemcpy(&c,dev_c,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"c = "<<c<<endl;
return 0;
}
|
a8fe1f563095b48107ed371185d06ba4eb56b68a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduceUnrolling16.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduceUnrolling16), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduceUnrolling16), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduceUnrolling16), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a8fe1f563095b48107ed371185d06ba4eb56b68a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduceUnrolling16.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduceUnrolling16<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduceUnrolling16<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduceUnrolling16<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e0579298245e2716ec0cee43003067c75e364774.hip | // !!! This is a file automatically generated by hipify!!!
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu.h"
#include "rpucuda.h"
#include "utility_functions.h"
#include <iostream>
//#include <random>
#include <chrono>
#include <cmath>
#include <memory>
namespace RPU {
/********************************************************************************
* RPUCudaSimple<T>
*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::initialize(CudaContext *c) {
context_ = c;
dev_weights_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_ * this->d_size_);
dev_weights_buffer_ = nullptr;
dev_fb_weights_ = nullptr;
dev_delta_weights_extern_ = nullptr;
dev_x_vector_bias_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_);
dev_x_vector_bias_->setConst(1.0);
dev_d_vector_ = RPU::make_unique<CudaArray<T>>(c, this->d_size_);
dev_x_vector_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_);
dev_x_matrix_bias_ = nullptr;
dev_x_matrix_bias_size_ = 0;
dev_temp_tensor_ = nullptr;
context_->synchronize();
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(CudaContext *c, int x_size, int d_size)
: RPUSimple<T>(x_size, d_size) {
this->initialize(c);
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCudaSimple constructed");
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(hipStream_t s, int x_size, int d_size)
: RPUSimple<T>(x_size, d_size) {
shared_context_ = RPU::make_unique<CudaContext>(s);
this->initialize(&*shared_context_);
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCudaSimple constructed (on shared stream)");
}
template <typename T> void RPUCudaSimple<T>::initFrom(const RPUSimple<T> &rpu) {
// this is private and called from constructors below
this->setWeights(rpu.getWeightsPtr()[0]);
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(CudaContext *c, RPUSimple<T> &o) : RPUSimple<T>(o) {
this->initialize(c);
initFrom(o);
DEBUG_OUT("RPUCudaSimple constructed from RPUSimple");
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(hipStream_t s, RPUSimple<T> &o) : RPUSimple<T>(o) {
// we are using the copy constructor of the base class RPUSimple
shared_context_ = RPU::make_unique<CudaContext>(s);
this->initialize(&*shared_context_);
initFrom(o);
DEBUG_OUT("RPUCudaSimple constructed from RPUSimple on shared stream");
}
template <typename T> RPUCudaSimple<T>::~RPUCudaSimple() {
// no need to care about the shared_pointers
}
// copy constructor
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(const RPUCudaSimple<T> &other) : RPUSimple<T>(other) {
other.context_->synchronizeContext();
this->initialize(other.context_); // private
// note: CUDA event/stream logic should be unique. It is only copied if already shared
shared_context_ = other.shared_context_;
if (other.dev_weights_) {
dev_weights_->assign(*other.dev_weights_);
}
// note: cannot copy external shared weight pointer... user needs to
// ensure that again setShared is called after copying
shared_weights_if_ = false;
if (other.dev_weights_buffer_) {
getWeightsBufferCuda(); // to initialize
dev_weights_buffer_->assign(*other.dev_weights_buffer_);
}
if (other.dev_fb_weights_) {
dev_fb_weights_ = RPU::make_unique<CudaArray<T>>(context_, this->x_size_ * this->d_size_);
dev_fb_weights_->assign(*other.dev_fb_weights_);
}
// cannot copy external weight pointer... user needs to call it again
dev_delta_weights_extern_ = nullptr;
if (other.fb_wmodifier_cuda_) {
// no copy... just new..
fb_wmodifier_cuda_ =
RPU::make_unique<WeightModifierCuda<T>>(context_, this->x_size_, this->d_size_);
}
if (other.wdrifter_cuda_) {
wdrifter_cuda_ = RPU::make_unique<WeightDrifterCuda<T>>(*other.wdrifter_cuda_);
}
// no copy
wclipper_cuda_ = nullptr;
dev_x_vector_->assign(*other.dev_x_vector_);
dev_d_vector_->assign(*other.dev_d_vector_);
dev_x_vector_bias_->assign(*other.dev_x_vector_bias_);
// do not care to copy the matrix/tensor/rnd buffers (will init automatically)
context_->synchronizeContext();
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCuda_Simple copy constructed.");
}
// copy assignment
template <typename T> RPUCudaSimple<T> &RPUCudaSimple<T>::operator=(const RPUCudaSimple<T> &other) {
RPUCudaSimple<T> tmp(other);
swap(*this, tmp);
return *this;
}
// move constructor
template <typename T> RPUCudaSimple<T>::RPUCudaSimple(RPUCudaSimple<T> &&other) {
*this = std::move(other);
}
// move assignment
template <typename T> RPUCudaSimple<T> &RPUCudaSimple<T>::operator=(RPUCudaSimple<T> &&other) {
RPUSimple<T>::operator=(std::move(other));
context_ = other.context_;
other.context_ = nullptr;
shared_context_ = other.shared_context_;
other.shared_context_ = nullptr;
dev_weights_ = std::move(other.dev_weights_);
dev_weights_buffer_ = std::move(other.dev_weights_buffer_);
dev_fb_weights_ = std::move(other.dev_fb_weights_);
dev_delta_weights_extern_ = other.dev_delta_weights_extern_;
other.dev_delta_weights_extern_ = nullptr;
dev_x_vector_ = std::move(other.dev_x_vector_);
dev_d_vector_ = std::move(other.dev_d_vector_);
dev_x_vector_bias_ = std::move(dev_x_vector_bias_);
dev_x_matrix_bias_ = std::move(other.dev_x_matrix_bias_);
dev_x_matrix_bias_size_ = other.dev_x_matrix_bias_size_;
other.dev_x_matrix_bias_size_ = 0;
dev_temp_tensor_ = std::move(other.dev_temp_tensor_);
rnd_diffusion_context_ = std::move(other.rnd_diffusion_context_);
dev_diffusion_nrnd_ = std::move(other.dev_diffusion_nrnd_);
wdrifter_cuda_ = std::move(other.wdrifter_cuda_);
shared_weights_if_ = other.shared_weights_if_;
return *this;
}
/***************************************************************************************/
template <typename T> void RPUCudaSimple<T>::copyWeightsToHost(T *weightsptr) const {
// copies the weights to the host and returns weight pointer, without changing the simple weights
DEBUG_OUT("RPUCuda: Get weights.");
T **transposed_weights = Array_2D_Get<T>(this->x_size_, this->d_size_);
context_->synchronizeDevice();
dev_weights_->copyTo(transposed_weights[0]);
for (int i = 0; i < this->d_size_; ++i) {
for (int j = 0; j < this->x_size_; ++j) {
weightsptr[j + this->x_size_ * i] = transposed_weights[j][i];
}
}
Array_2D_Free<T>(transposed_weights);
}
template <typename T> T **RPUCudaSimple<T>::copyWeightsToHost() {
// copies the weights to the host by using the simple weights and returns weight pointer
this->copyWeightsToHost(RPUSimple<T>::getWeightsPtr()[0]);
return RPUSimple<T>::getWeightsPtr();
}
template <typename T> T **RPUCudaSimple<T>::getWeights() { return this->copyWeightsToHost(); }
template <typename T> void RPUCudaSimple<T>::getWeights(T *weightsptr) const {
this->copyWeightsToHost(weightsptr);
}
template <typename T> void RPUCudaSimple<T>::setWeights(const T *host_source) {
// expects row order weights as source and sets the device weights
RPUSimple<T>::setWeights(host_source);
dev_weights_->assignTranspose(
RPUSimple<T>::getWeightsPtr()[0], this->getDSize(), this->getXSize());
}
template <typename T> void RPUCudaSimple<T>::setSharedWeights(T *device_source) {
if (device_source != dev_weights_->getData()) {
if (!this->shared_weights_if_) {
context_->synchronizeDevice();
dev_weights_->copyTo(device_source);
}
dev_weights_->synchronize();
dev_weights_->setShared(device_source);
this->shared_weights_if_ = true;
}
}
template <typename T>
void RPUCudaSimple<T>::getAndResetWeightUpdate(T *prev_weight_and_dw_out, T scale) {
RPU::math::elemsubcopy<T>(
context_, dev_weights_->getData(), prev_weight_and_dw_out, dev_weights_->getSize(), scale);
}
template <typename T> void RPUCudaSimple<T>::applyWeightUpdate(T *dw_and_current_weight_out) {
RPU::math::elemaddcopy<T>(
context_, dev_weights_->getData(), dw_and_current_weight_out, dev_weights_->getSize());
}
template <typename T> void RPUCudaSimple<T>::setWeightsUniformRandom(T min_value, T max_value) {
DEBUG_OUT("RPUCudaSimple weights init [" << min_value << "," << max_value << "]");
RPUSimple<T>::setWeightsUniformRandom(min_value, max_value);
this->setWeights(this->weights_[0]); // copies to CPU twice. Ignore
}
template <typename T> void RPUCudaSimple<T>::printToStream(std::stringstream &ss) const {
if (sizeof(T) == 4) {
ss << "RPUCudaSimple<float>(" << this->d_size_ << "," << this->x_size_ << ")\n";
} else {
ss << "RPUCudaSimple<double>(" << this->d_size_ << "," << this->x_size_ << ")\n";
}
};
/*********************************************************************************/
/*MATRIX/BIAS updates etc*********************************************************/
template <typename T> T *RPUCudaSimple<T>::getWeightsBufferCuda() {
if (dev_weights_buffer_ == nullptr) {
dev_weights_buffer_ =
RPU::make_unique<CudaArray<T>>(this->context_, this->x_size_ * this->d_size_);
}
return dev_weights_buffer_->getData();
};
template <typename T> void RPUCudaSimple<T>::copyWeightsFromBuffer() {
RPU::math::copy<T>(
this->context_, this->x_size_ * this->d_size_, getWeightsBufferCuda(), 1,
dev_weights_->getData(), 1);
}
template <typename T> void RPUCudaSimple<T>::copyWeightsToBuffer() {
RPU::math::copy<T>(
this->context_, this->x_size_ * this->d_size_, dev_weights_->getData(), 1,
getWeightsBufferCuda(), 1);
}
template <typename T> T *RPUCudaSimple<T>::getMatrixBiasBuffer(int m_batch) {
if (m_batch != dev_x_matrix_bias_size_) {
DEBUG_OUT("Get new buffer size " << m_batch);
dev_x_matrix_bias_ = nullptr;
dev_x_matrix_bias_size_ = m_batch;
dev_x_matrix_bias_ =
RPU::make_unique<CudaArray<T>>(this->context_, this->x_size_ * dev_x_matrix_bias_size_);
}
return dev_x_matrix_bias_->getData();
}
template <typename T>
T *RPUCudaSimple<T>::copyToMatrixBiasBuffer(
const T *X_input_without_bias, int m_batch, bool x_trans) {
T *bias_buffer = this->getMatrixBiasBuffer(m_batch);
RPU::math::makeBias<T>(
this->context_, bias_buffer, X_input_without_bias, this->x_size_, m_batch, x_trans);
return bias_buffer;
};
template <typename T>
void RPUCudaSimple<T>::copyFromMatrixBiasBuffer(
T *X_input_without_bias, int m_batch, bool x_trans) {
if ((m_batch != dev_x_matrix_bias_size_) || (dev_x_matrix_bias_ == nullptr)) {
RPU_FATAL("Buffer size mismatch. This should never happen!")
}
RPU::math::copyWithoutBias<T>(
this->context_, X_input_without_bias, dev_x_matrix_bias_->getData(), this->x_size_, m_batch,
x_trans);
};
template <typename T>
T *RPUCudaSimple<T>::copyToVectorBiasBuffer(const T *x_input_without_bias, int x_inc) {
RPU::math::copy<T>(
context_, this->x_size_ - 1, x_input_without_bias, x_inc, dev_x_vector_bias_->getData(), 1);
// last value of devx_vector_bias_ is set to 1 at the initialization (and never changed)!!
return dev_x_vector_bias_->getData();
};
template <typename T>
void RPUCudaSimple<T>::copyFromVectorBiasBuffer(T *x_output_without_bias, int x_inc) {
RPU::math::copy<T>(
context_, this->x_size_ - 1, dev_x_vector_bias_->getData(), 1, x_output_without_bias, x_inc);
};
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::getTensorBuffer(T **x_tensor, T **d_tensor, int m_batch, int dim3) {
int x_size = this->getXSize();
int d_size = this->getDSize();
int n = (x_size + d_size) * dim3 * m_batch;
if ((dev_temp_tensor_ == nullptr) || (dev_temp_tensor_->getSize() < n)) {
dev_temp_tensor_ = RPU::make_unique<CudaArray<T>>(context_, n);
}
*x_tensor = dev_temp_tensor_->getData();
*d_tensor = *x_tensor + (x_size)*dim3 * m_batch;
}
template <typename T>
void RPUCudaSimple<T>::permute132(
T *out_tensor, const T *in_tensor, int dim1, int dim2, int dim3, bool bias2) {
RPU::math::permute132<T>(context_, out_tensor, in_tensor, dim1, dim2, dim3, bias2);
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::copyIndexedInput(
T *out_tensor,
const T *in_tensor,
const int total_input_size,
const int *indices,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
bool batch_slice = m_batch_slice > 0;
if (batch_slice) {
if (trans) {
IndexReaderSliceInputIterator<true, T> in_iter(
in_tensor, indices, total_input_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
} else {
IndexReaderSliceInputIterator<false, T> in_iter(
in_tensor, indices, total_input_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
}
} else {
if (trans) {
IndexReaderTransInputIterator<T> in_iter(
in_tensor, indices, total_input_size / dim3, m_batch, size * m_batch, m_batch * dim3);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch * size * dim3);
} else {
IndexReaderInputIterator<T> in_iter(
in_tensor, indices, total_input_size / dim3, size * m_batch);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch * size * dim3);
}
}
}
template <typename T>
void RPUCudaSimple<T>::copyIndexedOutput(
T *out_tensor,
const T *in_tensor,
const int total_output_size,
const int *indices,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
// CAUTION: NO zeroying. Needs to be done from outside.
bool batch_slice = m_batch_slice > 0;
if (batch_slice) {
if (trans) {
IndexReaderSliceOutputIterator<true, T> out_iter(
out_tensor, indices, total_output_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
} else {
IndexReaderSliceOutputIterator<false, T> out_iter(
out_tensor, indices, total_output_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
}
} else {
if (trans) {
IndexReaderTransOutputIterator<T> out_iter(
out_tensor, indices, total_output_size / dim3, m_batch, size * m_batch, m_batch * dim3);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, size * m_batch * dim3);
} else {
IndexReaderOutputIterator<T> out_iter(
out_tensor, indices, total_output_size / dim3, size * m_batch);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, size * m_batch * dim3);
}
}
}
template <typename T>
void RPUCudaSimple<T>::copySliceInput(
T *out_tensor,
const T *in_tensor,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
if (trans) {
SliceInputIterator<true, T> in_iter(
in_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
} else {
SliceInputIterator<false, T> in_iter(
in_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
}
}
template <typename T>
void RPUCudaSimple<T>::copySliceOutput(
T *out_tensor,
const T *in_tensor,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
if (trans) {
SliceOutputIterator<true, T> out_iter(
out_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
} else {
SliceOutputIterator<false, T> out_iter(
out_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
}
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::forwardMatrix(
const T *X_input, T *D_output, int m_batch, bool x_trans, bool d_trans, bool is_test) {
RPU::detail::forwardMatrix(
this->context_, getFBWeightsCuda(is_test), X_input, this->x_size_, x_trans, D_output,
this->d_size_, d_trans, m_batch, this->getFwdAlpha());
};
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::backwardMatrix(
const T *D_input, T *X_output, int m_batch, bool d_trans, bool x_trans) {
RPU::detail::backwardMatrix(
this->context_, getFBWeightsCuda(false), D_input, this->d_size_, d_trans, X_output,
this->x_size_, x_trans, m_batch, this->getBwdAlpha());
};
/*********************************************************************************/
template <typename T> inline T *RPUCudaSimple<T>::getUpWeightsCuda() {
// This is called from the Update routines to check which weight
// is used for calculation. If dw is defined, then it will use the
// DW mode, meaning that it will write into delta_weights the DW
// and keep the weights. For HW RPU models that might include
// first using W and then writing the difference to dW
//
// is also handles the delayed weights
if (this->use_delayed_update_) {
return getWeightsBufferCuda();
} else {
return (dev_delta_weights_extern_ != nullptr) ? dev_delta_weights_extern_
: dev_weights_->getData();
}
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::updateMatrix(
const T *X_input, const T *D_input, int m_batch, bool x_trans, bool d_trans) {
this->last_update_m_batch_ = m_batch;
RPU::math::gemm<T>(
context_, d_trans, !x_trans,
this->d_size_, // M
this->x_size_, // N
m_batch, // K
-this->getAlphaLearningRate(), D_input, d_trans ? m_batch : this->d_size_, X_input,
x_trans ? m_batch : this->x_size_, this->getUpBeta(), this->getUpWeightsCuda(),
this->d_size_);
}
/*********************************************************************************/
/* vector forward/backward/update */
template <typename T>
void RPUCudaSimple<T>::forwardVector(
const T *x_input, T *d_output, int x_inc, int d_inc, bool is_test) {
RPU::math::gemv<T>(
context_, false, this->d_size_, this->x_size_, this->getFwdAlpha(), getFBWeightsCuda(is_test),
this->d_size_, // because of *column* major format !
x_input, x_inc, (T)0.0, d_output, d_inc);
// NOTE: no synchronization is done here. Assume all inputs are on the same stream
// if not, synchronization of contexts has to be done externaly.
};
template <typename T>
void RPUCudaSimple<T>::backwardVector(const T *d_input, T *x_output, int d_inc, int x_inc) {
RPU::math::gemv<T>(
context_,
true, // transpose
this->d_size_, this->x_size_, this->getBwdAlpha(), getFBWeightsCuda(false), this->d_size_,
d_input, d_inc, (T)0.0, x_output, x_inc);
};
template <typename T>
void RPUCudaSimple<T>::updateVector(const T *x_input, const T *d_input, int x_inc, int d_inc) {
if (!this->getDeltaWeights()) {
RPU::math::ger<T>(
context_, this->d_size_, this->x_size_, -this->getAlphaLearningRate(), d_input, d_inc,
x_input, x_inc, getUpWeightsCuda(), this->d_size_);
} else {
if (x_inc == 1 && d_inc == 1) {
RPUCudaSimple<T>::updateMatrix(x_input, d_input, 1, false, false);
} else {
RPU_FATAL("Update_Vector for delta weights and xd_inc>1 is not implemented.");
}
}
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::decayWeights(T alpha, bool bias_no_decay) {
T lifetime = this->getPar().lifetime;
T decay_rate = (lifetime > 1) ? (1.0 / lifetime) : 0.0;
T decay_scale = 1.0 - alpha * decay_rate;
if (decay_scale > 0.0 && decay_scale < 1.0) {
int size = this->x_size_ * this->d_size_;
// we have d_size major, ie col-major. Thus bias is just at the end
RPU::math::scal<T>(
context_, bias_no_decay ? MAX(size - this->d_size_, 0) : size, decay_scale,
dev_weights_->getData(), 1);
}
}
template <typename T> void RPUCudaSimple<T>::decayWeights(bool bias_no_decay) {
RPUCudaSimple<T>::decayWeights(1.0, bias_no_decay);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::driftWeights(T time_since_last_call) {
if (wdrifter_cuda_ == nullptr) {
auto wd =
WeightDrifter<T>(this->x_size_ * this->d_size_, this->getPar().drift); // forces simple
wdrifter_cuda_ =
RPU::make_unique<WeightDrifterCuda<T>>(this->context_, wd, this->x_size_, this->d_size_);
context_->synchronize();
}
wdrifter_cuda_->apply(dev_weights_->getData(), time_since_last_call);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::clipWeights(T clip) {
if (clip >= 0) {
RPU::math::aclip<T>(context_, dev_weights_->getData(), dev_weights_->getSize(), clip);
}
}
template <typename T> void RPUCudaSimple<T>::clipWeights(const WeightClipParameter &wclpar) {
if (!wclipper_cuda_) {
wclipper_cuda_ =
make_unique<WeightClipperCuda<T>>(this->context_, this->x_size_, this->d_size_);
}
wclipper_cuda_->apply(dev_weights_->getData(), wclpar);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::diffuseWeights() {
T diffusion = this->getPar().diffusion;
if (diffusion <= 0.0) {
return;
}
if (rnd_diffusion_context_ == nullptr) {
// first time: init
rnd_diffusion_context_ = RPU::make_unique<CudaContext>(context_->getGPUId());
dev_diffusion_nrnd_ = RPU::make_unique<CudaArray<float>>(
&*rnd_diffusion_context_, (dev_weights_->getSize() + 31) / 32 * 32);
rnd_diffusion_context_->setRandomSeed(0);
rnd_diffusion_context_->randNormal(
dev_diffusion_nrnd_->getData(), dev_diffusion_nrnd_->getSize());
}
context_->recordWaitEvent(rnd_diffusion_context_->getStream());
RPU::math::elemaddscale<T>(
context_, dev_weights_->getData(), dev_weights_->getSize(), dev_diffusion_nrnd_->getData(),
diffusion);
rnd_diffusion_context_->recordWaitEvent(context_->getStream());
rnd_diffusion_context_->randNormal(
dev_diffusion_nrnd_->getData(), dev_diffusion_nrnd_->getSize());
}
/***********************************************************************/
template <typename T> void RPUCudaSimple<T>::setDeltaWeights(T *dw_extern) {
ENFORCE_NO_DELAYED_UPDATE;
dev_delta_weights_extern_ = dw_extern;
}
template <typename T> T *RPUCudaSimple<T>::getFBWeightsCuda(bool is_test) const {
bool use_fb = dev_fb_weights_ &&
(!is_test || (fb_wmodifier_cuda_ && fb_wmodifier_cuda_->enableDuringTest()));
return use_fb ? dev_fb_weights_->getData() : dev_weights_->getData();
}
template <typename T> void RPUCudaSimple<T>::modifyFBWeights(const WeightModifierParameter &wmpar) {
ENFORCE_NO_DELAYED_UPDATE; // will get confused with the buffer
if (dev_fb_weights_ == nullptr) {
dev_fb_weights_ = RPU::make_unique<CudaArray<T>>(context_, this->x_size_ * this->d_size_);
fb_wmodifier_cuda_ =
RPU::make_unique<WeightModifierCuda<T>>(context_, this->x_size_, this->d_size_);
context_->synchronize();
}
// modify FB weights
fb_wmodifier_cuda_->apply(dev_fb_weights_->getData(), dev_weights_->getDataConst(), wmpar);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::printWeights(int x_count, int d_count) {
this->copyWeightsToHost(this->weights_[0]);
RPUSimple<T>::printWeights(x_count, d_count);
}
template class RPUCudaSimple<float>;
#ifdef RPU_USE_DOUBLE
template class RPUCudaSimple<double>;
#endif
} // namespace RPU
| e0579298245e2716ec0cee43003067c75e364774.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu.h"
#include "rpucuda.h"
#include "utility_functions.h"
#include <iostream>
//#include <random>
#include <chrono>
#include <cmath>
#include <memory>
namespace RPU {
/********************************************************************************
* RPUCudaSimple<T>
*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::initialize(CudaContext *c) {
context_ = c;
dev_weights_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_ * this->d_size_);
dev_weights_buffer_ = nullptr;
dev_fb_weights_ = nullptr;
dev_delta_weights_extern_ = nullptr;
dev_x_vector_bias_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_);
dev_x_vector_bias_->setConst(1.0);
dev_d_vector_ = RPU::make_unique<CudaArray<T>>(c, this->d_size_);
dev_x_vector_ = RPU::make_unique<CudaArray<T>>(c, this->x_size_);
dev_x_matrix_bias_ = nullptr;
dev_x_matrix_bias_size_ = 0;
dev_temp_tensor_ = nullptr;
context_->synchronize();
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(CudaContext *c, int x_size, int d_size)
: RPUSimple<T>(x_size, d_size) {
this->initialize(c);
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCudaSimple constructed");
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(cudaStream_t s, int x_size, int d_size)
: RPUSimple<T>(x_size, d_size) {
shared_context_ = RPU::make_unique<CudaContext>(s);
this->initialize(&*shared_context_);
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCudaSimple constructed (on shared stream)");
}
template <typename T> void RPUCudaSimple<T>::initFrom(const RPUSimple<T> &rpu) {
// this is private and called from constructors below
this->setWeights(rpu.getWeightsPtr()[0]);
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(CudaContext *c, RPUSimple<T> &o) : RPUSimple<T>(o) {
this->initialize(c);
initFrom(o);
DEBUG_OUT("RPUCudaSimple constructed from RPUSimple");
}
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(cudaStream_t s, RPUSimple<T> &o) : RPUSimple<T>(o) {
// we are using the copy constructor of the base class RPUSimple
shared_context_ = RPU::make_unique<CudaContext>(s);
this->initialize(&*shared_context_);
initFrom(o);
DEBUG_OUT("RPUCudaSimple constructed from RPUSimple on shared stream");
}
template <typename T> RPUCudaSimple<T>::~RPUCudaSimple() {
// no need to care about the shared_pointers
}
// copy constructor
template <typename T>
RPUCudaSimple<T>::RPUCudaSimple(const RPUCudaSimple<T> &other) : RPUSimple<T>(other) {
other.context_->synchronizeContext();
this->initialize(other.context_); // private
// note: CUDA event/stream logic should be unique. It is only copied if already shared
shared_context_ = other.shared_context_;
if (other.dev_weights_) {
dev_weights_->assign(*other.dev_weights_);
}
// note: cannot copy external shared weight pointer... user needs to
// ensure that again setShared is called after copying
shared_weights_if_ = false;
if (other.dev_weights_buffer_) {
getWeightsBufferCuda(); // to initialize
dev_weights_buffer_->assign(*other.dev_weights_buffer_);
}
if (other.dev_fb_weights_) {
dev_fb_weights_ = RPU::make_unique<CudaArray<T>>(context_, this->x_size_ * this->d_size_);
dev_fb_weights_->assign(*other.dev_fb_weights_);
}
// cannot copy external weight pointer... user needs to call it again
dev_delta_weights_extern_ = nullptr;
if (other.fb_wmodifier_cuda_) {
// no copy... just new..
fb_wmodifier_cuda_ =
RPU::make_unique<WeightModifierCuda<T>>(context_, this->x_size_, this->d_size_);
}
if (other.wdrifter_cuda_) {
wdrifter_cuda_ = RPU::make_unique<WeightDrifterCuda<T>>(*other.wdrifter_cuda_);
}
// no copy
wclipper_cuda_ = nullptr;
dev_x_vector_->assign(*other.dev_x_vector_);
dev_d_vector_->assign(*other.dev_d_vector_);
dev_x_vector_bias_->assign(*other.dev_x_vector_bias_);
// do not care to copy the matrix/tensor/rnd buffers (will init automatically)
context_->synchronizeContext();
DEBUG_CALL(this->disp(););
DEBUG_OUT("RPUCuda_Simple copy constructed.");
}
// copy assignment
template <typename T> RPUCudaSimple<T> &RPUCudaSimple<T>::operator=(const RPUCudaSimple<T> &other) {
RPUCudaSimple<T> tmp(other);
swap(*this, tmp);
return *this;
}
// move constructor
template <typename T> RPUCudaSimple<T>::RPUCudaSimple(RPUCudaSimple<T> &&other) {
*this = std::move(other);
}
// move assignment
template <typename T> RPUCudaSimple<T> &RPUCudaSimple<T>::operator=(RPUCudaSimple<T> &&other) {
RPUSimple<T>::operator=(std::move(other));
context_ = other.context_;
other.context_ = nullptr;
shared_context_ = other.shared_context_;
other.shared_context_ = nullptr;
dev_weights_ = std::move(other.dev_weights_);
dev_weights_buffer_ = std::move(other.dev_weights_buffer_);
dev_fb_weights_ = std::move(other.dev_fb_weights_);
dev_delta_weights_extern_ = other.dev_delta_weights_extern_;
other.dev_delta_weights_extern_ = nullptr;
dev_x_vector_ = std::move(other.dev_x_vector_);
dev_d_vector_ = std::move(other.dev_d_vector_);
dev_x_vector_bias_ = std::move(dev_x_vector_bias_);
dev_x_matrix_bias_ = std::move(other.dev_x_matrix_bias_);
dev_x_matrix_bias_size_ = other.dev_x_matrix_bias_size_;
other.dev_x_matrix_bias_size_ = 0;
dev_temp_tensor_ = std::move(other.dev_temp_tensor_);
rnd_diffusion_context_ = std::move(other.rnd_diffusion_context_);
dev_diffusion_nrnd_ = std::move(other.dev_diffusion_nrnd_);
wdrifter_cuda_ = std::move(other.wdrifter_cuda_);
shared_weights_if_ = other.shared_weights_if_;
return *this;
}
/***************************************************************************************/
template <typename T> void RPUCudaSimple<T>::copyWeightsToHost(T *weightsptr) const {
// copies the weights to the host and returns weight pointer, without changing the simple weights
DEBUG_OUT("RPUCuda: Get weights.");
T **transposed_weights = Array_2D_Get<T>(this->x_size_, this->d_size_);
context_->synchronizeDevice();
dev_weights_->copyTo(transposed_weights[0]);
for (int i = 0; i < this->d_size_; ++i) {
for (int j = 0; j < this->x_size_; ++j) {
weightsptr[j + this->x_size_ * i] = transposed_weights[j][i];
}
}
Array_2D_Free<T>(transposed_weights);
}
template <typename T> T **RPUCudaSimple<T>::copyWeightsToHost() {
// copies the weights to the host by using the simple weights and returns weight pointer
this->copyWeightsToHost(RPUSimple<T>::getWeightsPtr()[0]);
return RPUSimple<T>::getWeightsPtr();
}
template <typename T> T **RPUCudaSimple<T>::getWeights() { return this->copyWeightsToHost(); }
template <typename T> void RPUCudaSimple<T>::getWeights(T *weightsptr) const {
this->copyWeightsToHost(weightsptr);
}
template <typename T> void RPUCudaSimple<T>::setWeights(const T *host_source) {
// expects row order weights as source and sets the device weights
RPUSimple<T>::setWeights(host_source);
dev_weights_->assignTranspose(
RPUSimple<T>::getWeightsPtr()[0], this->getDSize(), this->getXSize());
}
template <typename T> void RPUCudaSimple<T>::setSharedWeights(T *device_source) {
if (device_source != dev_weights_->getData()) {
if (!this->shared_weights_if_) {
context_->synchronizeDevice();
dev_weights_->copyTo(device_source);
}
dev_weights_->synchronize();
dev_weights_->setShared(device_source);
this->shared_weights_if_ = true;
}
}
template <typename T>
void RPUCudaSimple<T>::getAndResetWeightUpdate(T *prev_weight_and_dw_out, T scale) {
RPU::math::elemsubcopy<T>(
context_, dev_weights_->getData(), prev_weight_and_dw_out, dev_weights_->getSize(), scale);
}
template <typename T> void RPUCudaSimple<T>::applyWeightUpdate(T *dw_and_current_weight_out) {
RPU::math::elemaddcopy<T>(
context_, dev_weights_->getData(), dw_and_current_weight_out, dev_weights_->getSize());
}
template <typename T> void RPUCudaSimple<T>::setWeightsUniformRandom(T min_value, T max_value) {
DEBUG_OUT("RPUCudaSimple weights init [" << min_value << "," << max_value << "]");
RPUSimple<T>::setWeightsUniformRandom(min_value, max_value);
this->setWeights(this->weights_[0]); // copies to CPU twice. Ignore
}
template <typename T> void RPUCudaSimple<T>::printToStream(std::stringstream &ss) const {
if (sizeof(T) == 4) {
ss << "RPUCudaSimple<float>(" << this->d_size_ << "," << this->x_size_ << ")\n";
} else {
ss << "RPUCudaSimple<double>(" << this->d_size_ << "," << this->x_size_ << ")\n";
}
};
/*********************************************************************************/
/*MATRIX/BIAS updates etc*********************************************************/
template <typename T> T *RPUCudaSimple<T>::getWeightsBufferCuda() {
if (dev_weights_buffer_ == nullptr) {
dev_weights_buffer_ =
RPU::make_unique<CudaArray<T>>(this->context_, this->x_size_ * this->d_size_);
}
return dev_weights_buffer_->getData();
};
template <typename T> void RPUCudaSimple<T>::copyWeightsFromBuffer() {
RPU::math::copy<T>(
this->context_, this->x_size_ * this->d_size_, getWeightsBufferCuda(), 1,
dev_weights_->getData(), 1);
}
template <typename T> void RPUCudaSimple<T>::copyWeightsToBuffer() {
RPU::math::copy<T>(
this->context_, this->x_size_ * this->d_size_, dev_weights_->getData(), 1,
getWeightsBufferCuda(), 1);
}
template <typename T> T *RPUCudaSimple<T>::getMatrixBiasBuffer(int m_batch) {
if (m_batch != dev_x_matrix_bias_size_) {
DEBUG_OUT("Get new buffer size " << m_batch);
dev_x_matrix_bias_ = nullptr;
dev_x_matrix_bias_size_ = m_batch;
dev_x_matrix_bias_ =
RPU::make_unique<CudaArray<T>>(this->context_, this->x_size_ * dev_x_matrix_bias_size_);
}
return dev_x_matrix_bias_->getData();
}
template <typename T>
T *RPUCudaSimple<T>::copyToMatrixBiasBuffer(
const T *X_input_without_bias, int m_batch, bool x_trans) {
T *bias_buffer = this->getMatrixBiasBuffer(m_batch);
RPU::math::makeBias<T>(
this->context_, bias_buffer, X_input_without_bias, this->x_size_, m_batch, x_trans);
return bias_buffer;
};
template <typename T>
void RPUCudaSimple<T>::copyFromMatrixBiasBuffer(
T *X_input_without_bias, int m_batch, bool x_trans) {
if ((m_batch != dev_x_matrix_bias_size_) || (dev_x_matrix_bias_ == nullptr)) {
RPU_FATAL("Buffer size mismatch. This should never happen!")
}
RPU::math::copyWithoutBias<T>(
this->context_, X_input_without_bias, dev_x_matrix_bias_->getData(), this->x_size_, m_batch,
x_trans);
};
template <typename T>
T *RPUCudaSimple<T>::copyToVectorBiasBuffer(const T *x_input_without_bias, int x_inc) {
RPU::math::copy<T>(
context_, this->x_size_ - 1, x_input_without_bias, x_inc, dev_x_vector_bias_->getData(), 1);
// last value of devx_vector_bias_ is set to 1 at the initialization (and never changed)!!
return dev_x_vector_bias_->getData();
};
template <typename T>
void RPUCudaSimple<T>::copyFromVectorBiasBuffer(T *x_output_without_bias, int x_inc) {
RPU::math::copy<T>(
context_, this->x_size_ - 1, dev_x_vector_bias_->getData(), 1, x_output_without_bias, x_inc);
};
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::getTensorBuffer(T **x_tensor, T **d_tensor, int m_batch, int dim3) {
int x_size = this->getXSize();
int d_size = this->getDSize();
int n = (x_size + d_size) * dim3 * m_batch;
if ((dev_temp_tensor_ == nullptr) || (dev_temp_tensor_->getSize() < n)) {
dev_temp_tensor_ = RPU::make_unique<CudaArray<T>>(context_, n);
}
*x_tensor = dev_temp_tensor_->getData();
*d_tensor = *x_tensor + (x_size)*dim3 * m_batch;
}
template <typename T>
void RPUCudaSimple<T>::permute132(
T *out_tensor, const T *in_tensor, int dim1, int dim2, int dim3, bool bias2) {
RPU::math::permute132<T>(context_, out_tensor, in_tensor, dim1, dim2, dim3, bias2);
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::copyIndexedInput(
T *out_tensor,
const T *in_tensor,
const int total_input_size,
const int *indices,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
bool batch_slice = m_batch_slice > 0;
if (batch_slice) {
if (trans) {
IndexReaderSliceInputIterator<true, T> in_iter(
in_tensor, indices, total_input_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
} else {
IndexReaderSliceInputIterator<false, T> in_iter(
in_tensor, indices, total_input_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
}
} else {
if (trans) {
IndexReaderTransInputIterator<T> in_iter(
in_tensor, indices, total_input_size / dim3, m_batch, size * m_batch, m_batch * dim3);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch * size * dim3);
} else {
IndexReaderInputIterator<T> in_iter(
in_tensor, indices, total_input_size / dim3, size * m_batch);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch * size * dim3);
}
}
}
template <typename T>
void RPUCudaSimple<T>::copyIndexedOutput(
T *out_tensor,
const T *in_tensor,
const int total_output_size,
const int *indices,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
// CAUTION: NO zeroying. Needs to be done from outside.
bool batch_slice = m_batch_slice > 0;
if (batch_slice) {
if (trans) {
IndexReaderSliceOutputIterator<true, T> out_iter(
out_tensor, indices, total_output_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
} else {
IndexReaderSliceOutputIterator<false, T> out_iter(
out_tensor, indices, total_output_size / dim3, size, m_batch, dim3, m_batch_slice,
batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
}
} else {
if (trans) {
IndexReaderTransOutputIterator<T> out_iter(
out_tensor, indices, total_output_size / dim3, m_batch, size * m_batch, m_batch * dim3);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, size * m_batch * dim3);
} else {
IndexReaderOutputIterator<T> out_iter(
out_tensor, indices, total_output_size / dim3, size * m_batch);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, size * m_batch * dim3);
}
}
}
template <typename T>
void RPUCudaSimple<T>::copySliceInput(
T *out_tensor,
const T *in_tensor,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
if (trans) {
SliceInputIterator<true, T> in_iter(
in_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
} else {
SliceInputIterator<false, T> in_iter(
in_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_tensor, in_iter, m_batch_slice * size * dim3);
}
}
template <typename T>
void RPUCudaSimple<T>::copySliceOutput(
T *out_tensor,
const T *in_tensor,
const int size,
const int m_batch,
const int dim3,
const bool trans,
const int m_batch_slice,
const int *batch_indices) {
if (trans) {
SliceOutputIterator<true, T> out_iter(
out_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
} else {
SliceOutputIterator<false, T> out_iter(
out_tensor, size, m_batch, dim3, m_batch_slice, batch_indices);
RPU::math::copyWithIterator(context_, out_iter, in_tensor, m_batch_slice * size * dim3);
}
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::forwardMatrix(
const T *X_input, T *D_output, int m_batch, bool x_trans, bool d_trans, bool is_test) {
RPU::detail::forwardMatrix(
this->context_, getFBWeightsCuda(is_test), X_input, this->x_size_, x_trans, D_output,
this->d_size_, d_trans, m_batch, this->getFwdAlpha());
};
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::backwardMatrix(
const T *D_input, T *X_output, int m_batch, bool d_trans, bool x_trans) {
RPU::detail::backwardMatrix(
this->context_, getFBWeightsCuda(false), D_input, this->d_size_, d_trans, X_output,
this->x_size_, x_trans, m_batch, this->getBwdAlpha());
};
/*********************************************************************************/
template <typename T> inline T *RPUCudaSimple<T>::getUpWeightsCuda() {
// This is called from the Update routines to check which weight
// is used for calculation. If dw is defined, then it will use the
// DW mode, meaning that it will write into delta_weights the DW
// and keep the weights. For HW RPU models that might include
// first using W and then writing the difference to dW
//
// is also handles the delayed weights
if (this->use_delayed_update_) {
return getWeightsBufferCuda();
} else {
return (dev_delta_weights_extern_ != nullptr) ? dev_delta_weights_extern_
: dev_weights_->getData();
}
}
/*********************************************************************************/
template <typename T>
void RPUCudaSimple<T>::updateMatrix(
const T *X_input, const T *D_input, int m_batch, bool x_trans, bool d_trans) {
this->last_update_m_batch_ = m_batch;
RPU::math::gemm<T>(
context_, d_trans, !x_trans,
this->d_size_, // M
this->x_size_, // N
m_batch, // K
-this->getAlphaLearningRate(), D_input, d_trans ? m_batch : this->d_size_, X_input,
x_trans ? m_batch : this->x_size_, this->getUpBeta(), this->getUpWeightsCuda(),
this->d_size_);
}
/*********************************************************************************/
/* vector forward/backward/update */
template <typename T>
void RPUCudaSimple<T>::forwardVector(
const T *x_input, T *d_output, int x_inc, int d_inc, bool is_test) {
RPU::math::gemv<T>(
context_, false, this->d_size_, this->x_size_, this->getFwdAlpha(), getFBWeightsCuda(is_test),
this->d_size_, // because of *column* major format !
x_input, x_inc, (T)0.0, d_output, d_inc);
// NOTE: no synchronization is done here. Assume all inputs are on the same stream
// if not, synchronization of contexts has to be done externaly.
};
template <typename T>
void RPUCudaSimple<T>::backwardVector(const T *d_input, T *x_output, int d_inc, int x_inc) {
RPU::math::gemv<T>(
context_,
true, // transpose
this->d_size_, this->x_size_, this->getBwdAlpha(), getFBWeightsCuda(false), this->d_size_,
d_input, d_inc, (T)0.0, x_output, x_inc);
};
template <typename T>
void RPUCudaSimple<T>::updateVector(const T *x_input, const T *d_input, int x_inc, int d_inc) {
if (!this->getDeltaWeights()) {
RPU::math::ger<T>(
context_, this->d_size_, this->x_size_, -this->getAlphaLearningRate(), d_input, d_inc,
x_input, x_inc, getUpWeightsCuda(), this->d_size_);
} else {
if (x_inc == 1 && d_inc == 1) {
RPUCudaSimple<T>::updateMatrix(x_input, d_input, 1, false, false);
} else {
RPU_FATAL("Update_Vector for delta weights and xd_inc>1 is not implemented.");
}
}
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::decayWeights(T alpha, bool bias_no_decay) {
T lifetime = this->getPar().lifetime;
T decay_rate = (lifetime > 1) ? (1.0 / lifetime) : 0.0;
T decay_scale = 1.0 - alpha * decay_rate;
if (decay_scale > 0.0 && decay_scale < 1.0) {
int size = this->x_size_ * this->d_size_;
// we have d_size major, ie col-major. Thus bias is just at the end
RPU::math::scal<T>(
context_, bias_no_decay ? MAX(size - this->d_size_, 0) : size, decay_scale,
dev_weights_->getData(), 1);
}
}
template <typename T> void RPUCudaSimple<T>::decayWeights(bool bias_no_decay) {
RPUCudaSimple<T>::decayWeights(1.0, bias_no_decay);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::driftWeights(T time_since_last_call) {
if (wdrifter_cuda_ == nullptr) {
auto wd =
WeightDrifter<T>(this->x_size_ * this->d_size_, this->getPar().drift); // forces simple
wdrifter_cuda_ =
RPU::make_unique<WeightDrifterCuda<T>>(this->context_, wd, this->x_size_, this->d_size_);
context_->synchronize();
}
wdrifter_cuda_->apply(dev_weights_->getData(), time_since_last_call);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::clipWeights(T clip) {
if (clip >= 0) {
RPU::math::aclip<T>(context_, dev_weights_->getData(), dev_weights_->getSize(), clip);
}
}
template <typename T> void RPUCudaSimple<T>::clipWeights(const WeightClipParameter &wclpar) {
if (!wclipper_cuda_) {
wclipper_cuda_ =
make_unique<WeightClipperCuda<T>>(this->context_, this->x_size_, this->d_size_);
}
wclipper_cuda_->apply(dev_weights_->getData(), wclpar);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::diffuseWeights() {
T diffusion = this->getPar().diffusion;
if (diffusion <= 0.0) {
return;
}
if (rnd_diffusion_context_ == nullptr) {
// first time: init
rnd_diffusion_context_ = RPU::make_unique<CudaContext>(context_->getGPUId());
dev_diffusion_nrnd_ = RPU::make_unique<CudaArray<float>>(
&*rnd_diffusion_context_, (dev_weights_->getSize() + 31) / 32 * 32);
rnd_diffusion_context_->setRandomSeed(0);
rnd_diffusion_context_->randNormal(
dev_diffusion_nrnd_->getData(), dev_diffusion_nrnd_->getSize());
}
context_->recordWaitEvent(rnd_diffusion_context_->getStream());
RPU::math::elemaddscale<T>(
context_, dev_weights_->getData(), dev_weights_->getSize(), dev_diffusion_nrnd_->getData(),
diffusion);
rnd_diffusion_context_->recordWaitEvent(context_->getStream());
rnd_diffusion_context_->randNormal(
dev_diffusion_nrnd_->getData(), dev_diffusion_nrnd_->getSize());
}
/***********************************************************************/
template <typename T> void RPUCudaSimple<T>::setDeltaWeights(T *dw_extern) {
ENFORCE_NO_DELAYED_UPDATE;
dev_delta_weights_extern_ = dw_extern;
}
template <typename T> T *RPUCudaSimple<T>::getFBWeightsCuda(bool is_test) const {
bool use_fb = dev_fb_weights_ &&
(!is_test || (fb_wmodifier_cuda_ && fb_wmodifier_cuda_->enableDuringTest()));
return use_fb ? dev_fb_weights_->getData() : dev_weights_->getData();
}
template <typename T> void RPUCudaSimple<T>::modifyFBWeights(const WeightModifierParameter &wmpar) {
ENFORCE_NO_DELAYED_UPDATE; // will get confused with the buffer
if (dev_fb_weights_ == nullptr) {
dev_fb_weights_ = RPU::make_unique<CudaArray<T>>(context_, this->x_size_ * this->d_size_);
fb_wmodifier_cuda_ =
RPU::make_unique<WeightModifierCuda<T>>(context_, this->x_size_, this->d_size_);
context_->synchronize();
}
// modify FB weights
fb_wmodifier_cuda_->apply(dev_fb_weights_->getData(), dev_weights_->getDataConst(), wmpar);
}
/*********************************************************************************/
template <typename T> void RPUCudaSimple<T>::printWeights(int x_count, int d_count) {
this->copyWeightsToHost(this->weights_[0]);
RPUSimple<T>::printWeights(x_count, d_count);
}
template class RPUCudaSimple<float>;
#ifdef RPU_USE_DOUBLE
template class RPUCudaSimple<double>;
#endif
} // namespace RPU
|
bac4af7f050eb222eb4a73854b973f5334435b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
#include "macros.hpp"
#include <ATen/ExpandUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPUtils.h>
#include <c10/core/ScalarType.h>
#include <stdio.h>
#include <torch/extension.h>
/*
return the indice of current point in the idxList
-1 outside
>= 0 inside
*/
template <typename indice_t>
__device__ void is_inside(const int topK, const indice_t *__restrict__ idxList,
const indice_t curr_Idx, int *curK) {
for (size_t i = 0; i < topK; i++) {
// a pixel is inside the splat if idxList contains point index
if (idxList[i] == curr_Idx) {
*curK = i;
return;
}
// a pixel definitely isn't inside a splat if it's not occupied by any point
if (idxList[i] == -1) {
*curK = -1;
return;
}
}
*curK = -1;
return;
}
/* compute pixel color after removing a point from a merged pixel */
// TODO curPointList probably no necessary, since rhoList and WsList will be
// zero at curPointList[k] == -1
template <typename scalar_t, typename indice_t>
__device__ void after_removal(const int numColors, const int topK,
const int curK, const scalar_t depthThres,
const scalar_t *depthList,
const indice_t *curPointList, // topK
const uint8_t *curIsBehind, // topK
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *curPixel, // numColors
scalar_t *newColors, // numColors
scalar_t *newDepth) {
// initialize color with 0.0
for (int c = 0; c < numColors; c++)
newColors[c] = 0.0;
// initialize depth with the farthest so far
*newDepth = depthList[topK - 1];
scalar_t sumRho = 0.0;
int numVisible = 0;
for (int k = 0; k < topK; k++) {
if (curIsBehind[k] == 0)
++numVisible;
}
// if it's the only visible point, then removing it will reveal the
// color below
assert(numVisible >= 0);
if (numVisible == 1)
{
sumRho = 0.0;
// CHECK: should be the second?
const scalar_t curDepth = depthList[1];
for(int k = curK + 1; k < topK; k++)
{
// as soon as idxList is -1 or depth > currentDepth+threshold
// stop accumulating colors
if (curPointList[k] == -1)
break;
if ((depthList[k] - curDepth) > depthThres)
break;
for (int c = 0; c < numColors; c++)
newColors[c] += wsList[k * numColors + c] * rhoList[k];
sumRho += rhoList[k];
if (depthList[k] < *newDepth)
*newDepth = depthList[k];
}
for (int c = 0; c < numColors; c++)
newColors[c] /= (sumRho + 1e-8);
return;
}
// not the only point visible:
// removing current point involves reweighting rhos
for (size_t k = 0; k < numVisible; k++)
{
if (k == curK)
continue;
for (size_t c = 0; c < numColors; c++)
newColors[c] += wsList[k * numColors + c] * rhoList[k];
sumRho += rhoList[k];
if (depthList[k] < *newDepth)
*newDepth = depthList[k];
}
for (size_t c = 0; c < numColors; c++)
newColors[c] /= (sumRho + 1e-8);
assert(sumRho > 0);
return;
}
/* compute pixel color after moving a point to a merged pixel */
template <typename scalar_t>
__device__ void
after_addition(const int numColors, const int topK, const scalar_t rho,
const scalar_t *ws, const scalar_t pointDepth,
const scalar_t depthThres, const scalar_t *depthList,
const uint8_t *curIsBehind, // topK
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *curPixel, // numColors
scalar_t *newColors, // numColors
scalar_t *newDepth) {
scalar_t sumRho = rho;
for (size_t k = 0; k < topK; k++) {
if (curIsBehind[k] > 0 ||
(depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth
break;
}
sumRho += rhoList[k];
}
if (sumRho == 0) {
sumRho += 1e-5;
}
for (size_t c = 0; c < numColors; c++)
newColors[c] = rho / sumRho * ws[c];
for (size_t k = 0; k < topK; k++) {
for (size_t c = 0; c < numColors; c++) {
if (curIsBehind[k] > 0 ||
(depthList[k] - depthThres) >
pointDepth) { // || (depthList[k] - depthThres) > pointDepth
break;
}
newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c];
}
}
*newDepth = min(depthList[0], pointDepth);
}
/*
compute pixel color after moving a point closer to the screen
*/
template <typename scalar_t>
__device__ void after_drawing_closer(const int numColors, const int topK,
const int curK,
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *depthList, // topK
const uint8_t *isBehind, // topK
scalar_t *newColors, scalar_t *newDepth) {
scalar_t curRho = rhoList[curK];
const scalar_t *curW = wsList + curK * numColors;
scalar_t pointDepth = depthList[curK];
scalar_t sumRho = curRho;
for (size_t k = 0; k < topK; k++) {
if (isBehind[k] > 0) {
break;
}
sumRho += rhoList[k];
}
// should at least have curRho
assert(sumRho > 0);
for (size_t c = 0; c < numColors; c++) {
newColors[c] = curRho / sumRho * curW[c];
}
for (size_t k = 0; k < topK; k++) {
for (size_t c = 0; c < numColors; c++) {
if (isBehind[k] > 0) {
break;
}
newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c];
}
}
*newDepth = min(depthList[0], pointDepth);
}
template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) {
const scalar_t eps = 0.01;
if (v < 0) {
return v - eps;
}
if (v >= 0) {
return v + eps;
}
// return v;
}
/*
a point is not "bad", i.e. don't need to be moved, when it's colorGrads is
zero within its effective extent (pointIdxMap include pointID && rhoMap > 0)
*/
template <typename scalar_t, typename indice_t>
__global__ void whitelist_points(
const int imgHeight, const int imgWidth, const int topK, const int PN,
const int batchSize, const int WDim,
const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output
const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK
const uint8_t *__restrict__ isBehind, // BxHxWxtopK
const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax
uint8_t *whitelist_mask // BxNx1
) {
const int numPixels = imgHeight * imgWidth;
const scalar_t eps = 1e-9;
// loop all points
for (int b = blockIdx.x; b < batchSize; b += gridDim.x) {
for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN;
p += blockDim.x * gridDim.y) {
const size_t curPointIdx = b * PN + p;
const indice_t *curBB = boundingBoxes + curPointIdx * 4;
const indice_t xmin = curBB[0];
const indice_t ymin = curBB[1];
const indice_t xmax = curBB[2];
const indice_t ymax = curBB[3];
// search within the bounding box
bool isGood = true;
bool inExtent = false;
for (size_t h = ymin; h < ymax; h++) {
for (size_t w = xmin; w < xmax; w++) {
const indice_t curPixelIdx = b * numPixels + h * imgWidth + w;
scalar_t colorGrad = 0.0;
for (size_t c = 0; c < WDim; c++) {
colorGrad += abs(colorGrads[curPixelIdx * WDim + c]);
}
// temporary flag for current pixel
bool _isGood = true;
bool _inExtent = false;
for (size_t k = 0; k < topK; k++) {
// inside the extent and is shown
if (pointIdxMap[curPixelIdx * topK + k] == p) {
_inExtent = true;
// is bad if some pixel inside the splat radius is not shown
// (isBehind) or colorGrad > threshold
_isGood =
!(isBehind[curPixelIdx * topK + k] > 0 || (colorGrad) > eps);
}
}
// there is one pixel in extent
inExtent = inExtent | _inExtent;
// as long as one pixel is not good, this point is not good
isGood = _isGood & isGood;
}
}
// if all pixels are not in extent, then this point is bad
whitelist_mask[curPointIdx] = inExtent & isGood;
}
}
}
/* */
template <typename scalar_t, typename indice_t>
__global__ void visibility_backward_kernel(
const int batchSize, const int imgHeight, const int imgWidth,
const int localHeight, const int localWidth, const int topK, const int PN,
const int projDim, const int WDim, const scalar_t focalL,
const scalar_t mergeT, const bool considerZ,
const scalar_t *__restrict__ colorGrads, // BxHxWxC gradient from output
const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK
const scalar_t *__restrict__ rhoMap, // BxHxWxtopK
const scalar_t *__restrict__ wsMap, // BxHxWxtopKxC
const scalar_t *__restrict__ depthMap, // BxHxWxtopK
const uint8_t *__restrict__ isBehind, // BxHxWxtopK
const scalar_t *__restrict__ pixelValues, // BxHxWxC
const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax
const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1
const scalar_t *__restrict__ pointColors, // BxNxC
const scalar_t *__restrict__ depthValues, // BxNx1
const scalar_t *__restrict__ rhoValues, // BxNx1
scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY
scalar_t *__restrict__ dIdz) // BxNx1 gradients for z
{
// const scalar_t mergeT = scalar_t(mergeThres);
// const scalar_t focalL = scalar_t(focalLength);
const int numPixels = imgHeight * imgWidth;
// loop all points
for (int b = blockIdx.x; b < batchSize; b += gridDim.x) {
for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN;
p += blockDim.x * gridDim.y) {
const indice_t curPointIdx = b * PN + p;
// skip point (gradient=0) if mask == 1 (i.e. point is good)
scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]);
scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]);
// scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]);
// scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]);
const scalar_t *curPointColor = pointColors + curPointIdx * WDim;
const scalar_t *curProjValues = projPoints + curPointIdx * projDim;
scalar_t *dIdx = dIdp + curPointIdx * projDim;
scalar_t *dIdy = dIdp + curPointIdx * projDim + 1;
scalar_t *curdIdz = dIdz + curPointIdx;
const scalar_t rhov = rhoValues[curPointIdx];
const int bH =
min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight);
const int eH =
max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0);
const int bW =
min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth);
const int eW =
max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0);
// loop all pixels
for (size_t i = bH; i < eH; i++) {
for (size_t j = bW; j < eW; j++) {
const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j);
const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim;
const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim;
const scalar_t *curRhos = rhoMap + curPixelIdx * topK;
// const indice_t curClosest = pointIdxMap[curPixelIdx * topK];
// const indice_t curClosestIdx = b * PN + curClosest;
const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK;
const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim;
const scalar_t *curDepthList = depthMap + curPixelIdx * topK;
// const scalar_t curClosestDepth = depthMap[curPixelIdx * topK];
const uint8_t *curIsBehind = isBehind + curPixelIdx * topK;
const scalar_t curPointDepth = depthValues[curPointIdx];
// is this pixel inside the splat?
int curK;
is_inside(topK, curIdxList, curPointIdx, &curK);
scalar_t didxv = 0.0;
scalar_t didyv = 0.0;
scalar_t didzv = 0.0;
scalar_t dldI = 0.0;
scalar_t newColors[10];
scalar_t newDepth;
// outside
if (curK < 0)
{
after_addition(WDim, topK, rhov, curPointColor, curPointDepth,
mergeT, curDepthList, curIsBehind, curWs, curRhos,
curPixelValues, newColors, &newDepth);
for (size_t c = 0; c < WDim; c++)
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
if (dldI < 0.0)
{
// another point at pixel i,j is in front of the current point by
// a threshold, need to change z, otherwise moving to that
// direction won't change the color value
if (curPointDepth - newDepth > mergeT)
{
if (!considerZ)
continue;
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL /
imgWidth * 2 * curPointDepth;
scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL /
imgHeight * 2 * curPointDepth;
assert(newDepth < curPointDepth);
scalar_t dz_3d = newDepth - curPointDepth;
scalar_t distance2_3d =
eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d);
scalar_t distance2 = eps_guard(dx * dx + dy * dy);
didzv = dldI / distance2_3d * dz_3d;
// should rescale to screen space
didxv = dldI / distance2 * dx;
didyv = dldI / distance2 * dy;
assert(!isnan(didxv));
assert(!isnan(didyv));
}
else // don't need to change z
{
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t distance2 = eps_guard(dx * dx + dy * dy);
// dIdx
didxv = dldI / distance2 * dx;
// dIdy
didyv = dldI / distance2 * dy;
assert(!isnan(didxv));
assert(!isnan(didyv));
}
}
}
// pixel inside splat
else // i.e. curK >= 0
{
// is the current point shown?
if (curIsBehind[curK] < 1) // yes
{
// dIdx dIdy and dIdz-
after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList,
curIsBehind, curWs, curRhos, curPixelValues,
newColors, &newDepth);
for (size_t c = 0; c < WDim; c++)
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
if (dldI < 0.0)
{
// dIdp = (dIdp+) + (dIdp-)
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy));
scalar_t rx = curProjValues[0] - xmin;
scalar_t ry = curProjValues[1] - ymin;
assert(rx > 0);
assert(ry > 0);
scalar_t r = max(rx, ry);
didxv = dldI * dx / eps_guard((r + distance) * distance) +
dldI * dx / eps_guard((distance - r) * distance);
didyv = dldI * dy / eps_guard((r + distance) * distance) +
dldI * dy / eps_guard((distance - r) * distance);
assert(!isnan(didxv));
assert(!isnan(didyv));
}
} // endif (curRhos[curK] > 0)
// point is not visible:
else // i.e. curIsBehind[curK] >= 1
{
if (!considerZ)
continue;
// this point is occluded by other points, moving closer will
// change the color
after_drawing_closer(WDim, topK, curK, curWs, curRhos,
curDepthList, curIsBehind, newColors,
&newDepth);
for (size_t c = 0; c < WDim; c++) {
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
}
if (dldI < 0.0) {
didzv = dldI / eps_guard(newDepth - curPointDepth);
}
} // endif on top
} // endif inside
(*curdIdz) += didzv;
(*dIdx) += didxv;
(*dIdy) += didyv;
} // imWidth
} // imHeight
} // point
} // batch
}
// dIdp BxNx2 dx dy, dIdz BxNx1
std::vector<at::Tensor>
visibility_backward_cuda(const double focalLength, const double mergeThres,
const bool considerZ, const int localHeight,
const int localWidth,
const at::Tensor &colorGrads, // BxHxWxWDim
const at::Tensor &pointIdxMap, // BxHxWxtopK
const at::Tensor &rhoMap, // BxHxWxtopK
const at::Tensor &wsMap, // BxHxWxtopKxWDim
const at::Tensor &depthMap, // BxHxWxtopK
const at::Tensor &isBehind, // BxHxWxtopK
const at::Tensor &pixelValues, // BxHxWxWDim
const at::Tensor &boundingBoxes, // BxNx4
const at::Tensor &projPoints, // BxNx[2or3]
const at::Tensor &pointColors, // BxNxWDim
const at::Tensor &depthValues, // BxNx1
const at::Tensor &rhoValues, // BxNx1
at::Tensor &dIdp, at::Tensor &dIdz) {
const int batchSize = pointIdxMap.size(0);
const int imgHeight = pointIdxMap.size(1);
const int imgWidth = pointIdxMap.size(2);
const int topK = pointIdxMap.size(3);
const int PN = projPoints.size(1);
const int WDim = pointColors.size(2);
CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3);
const int projDim = projPoints.size(2);
CHECK_EQ(pointColors.size(1), PN);
CHECK(colorGrads.size(-1) == wsMap.size(-1) &&
wsMap.size(-1) == pixelValues.size(-1) &&
pixelValues.size(-1) == pointColors.size(-1));
std::vector<at::Tensor> outputs;
unsigned int n_threads, n_blocks;
n_threads = opt_n_threads(PN);
n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads);
// initialize with zeros
dIdp.zero_();
dIdz.zero_();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
std::vector<at::Tensor> output;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
colorGrads.type(), "visibility_backward_kernel", ([&] {
hipLaunchKernelGGL(( visibility_backward_kernel<scalar_t, int64_t>)
, dim3(dim3(batchSize, n_blocks, 1)), dim3(n_threads), 0, stream,
batchSize, imgHeight, imgWidth, localHeight, localWidth, topK,
PN, projDim, WDim, focalLength, mergeThres, considerZ,
colorGrads.data<scalar_t>(), // BxHxWx3
pointIdxMap.data<int64_t>(), // BxHxWxtopK
rhoMap.data<scalar_t>(), // BxHxWxtopK
wsMap.data<scalar_t>(), // BxHxWxtopKx3
depthMap.data<scalar_t>(), // BxHxWxtopK
isBehind.data<uint8_t>(), // BxHxWxtopK
pixelValues.data<scalar_t>(), // BxHxWx3
boundingBoxes.toType(pointIdxMap.scalar_type())
.data<int64_t>(), // BxNx4 xmin ymin xmax ymax
projPoints.data<scalar_t>(), // BxNx[2or3], xy1
pointColors.data<scalar_t>(), // BxNx3
depthValues.data<scalar_t>(), // BxNx1
rhoValues.data<scalar_t>(), // BxNx1
dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y
dIdz.data<scalar_t>() // BxNx1
); // BxHxWx8
}));
output.push_back(dIdp);
output.push_back(dIdz);
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
printf("compute_visiblity_maps_cuda kernel failed: %s\n",
hipGetErrorString(err));
exit(-1);
}
return output;
} | bac4af7f050eb222eb4a73854b973f5334435b0d.cu | #include "cuda_utils.h"
#include "macros.hpp"
#include <ATen/ExpandUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAUtils.h>
#include <c10/core/ScalarType.h>
#include <stdio.h>
#include <torch/extension.h>
/*
return the indice of current point in the idxList
-1 outside
>= 0 inside
*/
template <typename indice_t>
__device__ void is_inside(const int topK, const indice_t *__restrict__ idxList,
const indice_t curr_Idx, int *curK) {
for (size_t i = 0; i < topK; i++) {
// a pixel is inside the splat if idxList contains point index
if (idxList[i] == curr_Idx) {
*curK = i;
return;
}
// a pixel definitely isn't inside a splat if it's not occupied by any point
if (idxList[i] == -1) {
*curK = -1;
return;
}
}
*curK = -1;
return;
}
/* compute pixel color after removing a point from a merged pixel */
// TODO curPointList probably no necessary, since rhoList and WsList will be
// zero at curPointList[k] == -1
template <typename scalar_t, typename indice_t>
__device__ void after_removal(const int numColors, const int topK,
const int curK, const scalar_t depthThres,
const scalar_t *depthList,
const indice_t *curPointList, // topK
const uint8_t *curIsBehind, // topK
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *curPixel, // numColors
scalar_t *newColors, // numColors
scalar_t *newDepth) {
// initialize color with 0.0
for (int c = 0; c < numColors; c++)
newColors[c] = 0.0;
// initialize depth with the farthest so far
*newDepth = depthList[topK - 1];
scalar_t sumRho = 0.0;
int numVisible = 0;
for (int k = 0; k < topK; k++) {
if (curIsBehind[k] == 0)
++numVisible;
}
// if it's the only visible point, then removing it will reveal the
// color below
assert(numVisible >= 0);
if (numVisible == 1)
{
sumRho = 0.0;
// CHECK: should be the second?
const scalar_t curDepth = depthList[1];
for(int k = curK + 1; k < topK; k++)
{
// as soon as idxList is -1 or depth > currentDepth+threshold
// stop accumulating colors
if (curPointList[k] == -1)
break;
if ((depthList[k] - curDepth) > depthThres)
break;
for (int c = 0; c < numColors; c++)
newColors[c] += wsList[k * numColors + c] * rhoList[k];
sumRho += rhoList[k];
if (depthList[k] < *newDepth)
*newDepth = depthList[k];
}
for (int c = 0; c < numColors; c++)
newColors[c] /= (sumRho + 1e-8);
return;
}
// not the only point visible:
// removing current point involves reweighting rhos
for (size_t k = 0; k < numVisible; k++)
{
if (k == curK)
continue;
for (size_t c = 0; c < numColors; c++)
newColors[c] += wsList[k * numColors + c] * rhoList[k];
sumRho += rhoList[k];
if (depthList[k] < *newDepth)
*newDepth = depthList[k];
}
for (size_t c = 0; c < numColors; c++)
newColors[c] /= (sumRho + 1e-8);
assert(sumRho > 0);
return;
}
/* compute pixel color after moving a point to a merged pixel */
template <typename scalar_t>
__device__ void
after_addition(const int numColors, const int topK, const scalar_t rho,
const scalar_t *ws, const scalar_t pointDepth,
const scalar_t depthThres, const scalar_t *depthList,
const uint8_t *curIsBehind, // topK
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *curPixel, // numColors
scalar_t *newColors, // numColors
scalar_t *newDepth) {
scalar_t sumRho = rho;
for (size_t k = 0; k < topK; k++) {
if (curIsBehind[k] > 0 ||
(depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth
break;
}
sumRho += rhoList[k];
}
if (sumRho == 0) {
sumRho += 1e-5;
}
for (size_t c = 0; c < numColors; c++)
newColors[c] = rho / sumRho * ws[c];
for (size_t k = 0; k < topK; k++) {
for (size_t c = 0; c < numColors; c++) {
if (curIsBehind[k] > 0 ||
(depthList[k] - depthThres) >
pointDepth) { // || (depthList[k] - depthThres) > pointDepth
break;
}
newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c];
}
}
*newDepth = min(depthList[0], pointDepth);
}
/*
compute pixel color after moving a point closer to the screen
*/
template <typename scalar_t>
__device__ void after_drawing_closer(const int numColors, const int topK,
const int curK,
const scalar_t *wsList, // topKx3
const scalar_t *rhoList, // topKx1
const scalar_t *depthList, // topK
const uint8_t *isBehind, // topK
scalar_t *newColors, scalar_t *newDepth) {
scalar_t curRho = rhoList[curK];
const scalar_t *curW = wsList + curK * numColors;
scalar_t pointDepth = depthList[curK];
scalar_t sumRho = curRho;
for (size_t k = 0; k < topK; k++) {
if (isBehind[k] > 0) {
break;
}
sumRho += rhoList[k];
}
// should at least have curRho
assert(sumRho > 0);
for (size_t c = 0; c < numColors; c++) {
newColors[c] = curRho / sumRho * curW[c];
}
for (size_t k = 0; k < topK; k++) {
for (size_t c = 0; c < numColors; c++) {
if (isBehind[k] > 0) {
break;
}
newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c];
}
}
*newDepth = min(depthList[0], pointDepth);
}
template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) {
const scalar_t eps = 0.01;
if (v < 0) {
return v - eps;
}
if (v >= 0) {
return v + eps;
}
// return v;
}
/*
a point is not "bad", i.e. don't need to be moved, when it's colorGrads is
zero within its effective extent (pointIdxMap include pointID && rhoMap > 0)
*/
template <typename scalar_t, typename indice_t>
__global__ void whitelist_points(
const int imgHeight, const int imgWidth, const int topK, const int PN,
const int batchSize, const int WDim,
const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output
const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK
const uint8_t *__restrict__ isBehind, // BxHxWxtopK
const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax
uint8_t *whitelist_mask // BxNx1
) {
const int numPixels = imgHeight * imgWidth;
const scalar_t eps = 1e-9;
// loop all points
for (int b = blockIdx.x; b < batchSize; b += gridDim.x) {
for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN;
p += blockDim.x * gridDim.y) {
const size_t curPointIdx = b * PN + p;
const indice_t *curBB = boundingBoxes + curPointIdx * 4;
const indice_t xmin = curBB[0];
const indice_t ymin = curBB[1];
const indice_t xmax = curBB[2];
const indice_t ymax = curBB[3];
// search within the bounding box
bool isGood = true;
bool inExtent = false;
for (size_t h = ymin; h < ymax; h++) {
for (size_t w = xmin; w < xmax; w++) {
const indice_t curPixelIdx = b * numPixels + h * imgWidth + w;
scalar_t colorGrad = 0.0;
for (size_t c = 0; c < WDim; c++) {
colorGrad += abs(colorGrads[curPixelIdx * WDim + c]);
}
// temporary flag for current pixel
bool _isGood = true;
bool _inExtent = false;
for (size_t k = 0; k < topK; k++) {
// inside the extent and is shown
if (pointIdxMap[curPixelIdx * topK + k] == p) {
_inExtent = true;
// is bad if some pixel inside the splat radius is not shown
// (isBehind) or colorGrad > threshold
_isGood =
!(isBehind[curPixelIdx * topK + k] > 0 || (colorGrad) > eps);
}
}
// there is one pixel in extent
inExtent = inExtent | _inExtent;
// as long as one pixel is not good, this point is not good
isGood = _isGood & isGood;
}
}
// if all pixels are not in extent, then this point is bad
whitelist_mask[curPointIdx] = inExtent & isGood;
}
}
}
/* */
template <typename scalar_t, typename indice_t>
__global__ void visibility_backward_kernel(
const int batchSize, const int imgHeight, const int imgWidth,
const int localHeight, const int localWidth, const int topK, const int PN,
const int projDim, const int WDim, const scalar_t focalL,
const scalar_t mergeT, const bool considerZ,
const scalar_t *__restrict__ colorGrads, // BxHxWxC gradient from output
const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK
const scalar_t *__restrict__ rhoMap, // BxHxWxtopK
const scalar_t *__restrict__ wsMap, // BxHxWxtopKxC
const scalar_t *__restrict__ depthMap, // BxHxWxtopK
const uint8_t *__restrict__ isBehind, // BxHxWxtopK
const scalar_t *__restrict__ pixelValues, // BxHxWxC
const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax
const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1
const scalar_t *__restrict__ pointColors, // BxNxC
const scalar_t *__restrict__ depthValues, // BxNx1
const scalar_t *__restrict__ rhoValues, // BxNx1
scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY
scalar_t *__restrict__ dIdz) // BxNx1 gradients for z
{
// const scalar_t mergeT = scalar_t(mergeThres);
// const scalar_t focalL = scalar_t(focalLength);
const int numPixels = imgHeight * imgWidth;
// loop all points
for (int b = blockIdx.x; b < batchSize; b += gridDim.x) {
for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN;
p += blockDim.x * gridDim.y) {
const indice_t curPointIdx = b * PN + p;
// skip point (gradient=0) if mask == 1 (i.e. point is good)
scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]);
scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]);
// scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]);
// scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]);
const scalar_t *curPointColor = pointColors + curPointIdx * WDim;
const scalar_t *curProjValues = projPoints + curPointIdx * projDim;
scalar_t *dIdx = dIdp + curPointIdx * projDim;
scalar_t *dIdy = dIdp + curPointIdx * projDim + 1;
scalar_t *curdIdz = dIdz + curPointIdx;
const scalar_t rhov = rhoValues[curPointIdx];
const int bH =
min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight);
const int eH =
max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0);
const int bW =
min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth);
const int eW =
max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0);
// loop all pixels
for (size_t i = bH; i < eH; i++) {
for (size_t j = bW; j < eW; j++) {
const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j);
const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim;
const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim;
const scalar_t *curRhos = rhoMap + curPixelIdx * topK;
// const indice_t curClosest = pointIdxMap[curPixelIdx * topK];
// const indice_t curClosestIdx = b * PN + curClosest;
const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK;
const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim;
const scalar_t *curDepthList = depthMap + curPixelIdx * topK;
// const scalar_t curClosestDepth = depthMap[curPixelIdx * topK];
const uint8_t *curIsBehind = isBehind + curPixelIdx * topK;
const scalar_t curPointDepth = depthValues[curPointIdx];
// is this pixel inside the splat?
int curK;
is_inside(topK, curIdxList, curPointIdx, &curK);
scalar_t didxv = 0.0;
scalar_t didyv = 0.0;
scalar_t didzv = 0.0;
scalar_t dldI = 0.0;
scalar_t newColors[10];
scalar_t newDepth;
// outside
if (curK < 0)
{
after_addition(WDim, topK, rhov, curPointColor, curPointDepth,
mergeT, curDepthList, curIsBehind, curWs, curRhos,
curPixelValues, newColors, &newDepth);
for (size_t c = 0; c < WDim; c++)
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
if (dldI < 0.0)
{
// another point at pixel i,j is in front of the current point by
// a threshold, need to change z, otherwise moving to that
// direction won't change the color value
if (curPointDepth - newDepth > mergeT)
{
if (!considerZ)
continue;
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL /
imgWidth * 2 * curPointDepth;
scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL /
imgHeight * 2 * curPointDepth;
assert(newDepth < curPointDepth);
scalar_t dz_3d = newDepth - curPointDepth;
scalar_t distance2_3d =
eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d);
scalar_t distance2 = eps_guard(dx * dx + dy * dy);
didzv = dldI / distance2_3d * dz_3d;
// should rescale to screen space
didxv = dldI / distance2 * dx;
didyv = dldI / distance2 * dy;
assert(!isnan(didxv));
assert(!isnan(didyv));
}
else // don't need to change z
{
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t distance2 = eps_guard(dx * dx + dy * dy);
// dIdx
didxv = dldI / distance2 * dx;
// dIdy
didyv = dldI / distance2 * dy;
assert(!isnan(didxv));
assert(!isnan(didyv));
}
}
}
// pixel inside splat
else // i.e. curK >= 0
{
// is the current point shown?
if (curIsBehind[curK] < 1) // yes
{
// dIdx dIdy and dIdz-
after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList,
curIsBehind, curWs, curRhos, curPixelValues,
newColors, &newDepth);
for (size_t c = 0; c < WDim; c++)
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
if (dldI < 0.0)
{
// dIdp = (dIdp+) + (dIdp-)
scalar_t dx = (scalar_t(j) - curProjValues[0]);
scalar_t dy = (scalar_t(i) - curProjValues[1]);
scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy));
scalar_t rx = curProjValues[0] - xmin;
scalar_t ry = curProjValues[1] - ymin;
assert(rx > 0);
assert(ry > 0);
scalar_t r = max(rx, ry);
didxv = dldI * dx / eps_guard((r + distance) * distance) +
dldI * dx / eps_guard((distance - r) * distance);
didyv = dldI * dy / eps_guard((r + distance) * distance) +
dldI * dy / eps_guard((distance - r) * distance);
assert(!isnan(didxv));
assert(!isnan(didyv));
}
} // endif (curRhos[curK] > 0)
// point is not visible:
else // i.e. curIsBehind[curK] >= 1
{
if (!considerZ)
continue;
// this point is occluded by other points, moving closer will
// change the color
after_drawing_closer(WDim, topK, curK, curWs, curRhos,
curDepthList, curIsBehind, newColors,
&newDepth);
for (size_t c = 0; c < WDim; c++) {
dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c];
}
if (dldI < 0.0) {
didzv = dldI / eps_guard(newDepth - curPointDepth);
}
} // endif on top
} // endif inside
(*curdIdz) += didzv;
(*dIdx) += didxv;
(*dIdy) += didyv;
} // imWidth
} // imHeight
} // point
} // batch
}
// dIdp BxNx2 dx dy, dIdz BxNx1
std::vector<at::Tensor>
visibility_backward_cuda(const double focalLength, const double mergeThres,
const bool considerZ, const int localHeight,
const int localWidth,
const at::Tensor &colorGrads, // BxHxWxWDim
const at::Tensor &pointIdxMap, // BxHxWxtopK
const at::Tensor &rhoMap, // BxHxWxtopK
const at::Tensor &wsMap, // BxHxWxtopKxWDim
const at::Tensor &depthMap, // BxHxWxtopK
const at::Tensor &isBehind, // BxHxWxtopK
const at::Tensor &pixelValues, // BxHxWxWDim
const at::Tensor &boundingBoxes, // BxNx4
const at::Tensor &projPoints, // BxNx[2or3]
const at::Tensor &pointColors, // BxNxWDim
const at::Tensor &depthValues, // BxNx1
const at::Tensor &rhoValues, // BxNx1
at::Tensor &dIdp, at::Tensor &dIdz) {
const int batchSize = pointIdxMap.size(0);
const int imgHeight = pointIdxMap.size(1);
const int imgWidth = pointIdxMap.size(2);
const int topK = pointIdxMap.size(3);
const int PN = projPoints.size(1);
const int WDim = pointColors.size(2);
CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3);
const int projDim = projPoints.size(2);
CHECK_EQ(pointColors.size(1), PN);
CHECK(colorGrads.size(-1) == wsMap.size(-1) &&
wsMap.size(-1) == pixelValues.size(-1) &&
pixelValues.size(-1) == pointColors.size(-1));
std::vector<at::Tensor> outputs;
unsigned int n_threads, n_blocks;
n_threads = opt_n_threads(PN);
n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads);
// initialize with zeros
dIdp.zero_();
dIdz.zero_();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
std::vector<at::Tensor> output;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
colorGrads.type(), "visibility_backward_kernel", ([&] {
visibility_backward_kernel<scalar_t, int64_t>
<<<dim3(batchSize, n_blocks, 1), n_threads, 0, stream>>>(
batchSize, imgHeight, imgWidth, localHeight, localWidth, topK,
PN, projDim, WDim, focalLength, mergeThres, considerZ,
colorGrads.data<scalar_t>(), // BxHxWx3
pointIdxMap.data<int64_t>(), // BxHxWxtopK
rhoMap.data<scalar_t>(), // BxHxWxtopK
wsMap.data<scalar_t>(), // BxHxWxtopKx3
depthMap.data<scalar_t>(), // BxHxWxtopK
isBehind.data<uint8_t>(), // BxHxWxtopK
pixelValues.data<scalar_t>(), // BxHxWx3
boundingBoxes.toType(pointIdxMap.scalar_type())
.data<int64_t>(), // BxNx4 xmin ymin xmax ymax
projPoints.data<scalar_t>(), // BxNx[2or3], xy1
pointColors.data<scalar_t>(), // BxNx3
depthValues.data<scalar_t>(), // BxNx1
rhoValues.data<scalar_t>(), // BxNx1
dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y
dIdz.data<scalar_t>() // BxNx1
); // BxHxWx8
}));
output.push_back(dIdp);
output.push_back(dIdz);
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("compute_visiblity_maps_cuda kernel failed: %s\n",
cudaGetErrorString(err));
exit(-1);
}
return output;
} |
3a59749f0632abc17ab58a98c9da7af328e9af0e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "convert.h"
#include "stencilMVM.h"
#include <cutil.h>
float* getImage(uint width, uint height, float* devImage) {
int imageSize = width * height * sizeof(float);
float* result = (float*)malloc(imageSize);
CUDA_SAFE_CALL(hipMemcpy(result, devImage, imageSize, hipMemcpyDeviceToHost));
return result;
}
int* getImage(uint width, uint height, int* devImage) {
int imageSize = width * height * sizeof(int);
int* result = (int*)malloc(imageSize);
CUDA_SAFE_CALL(hipMemcpy(result, devImage, imageSize, hipMemcpyDeviceToHost));
return result;
}
void writeTextImage(const char* filename, uint width, uint height, float* image) {
FILE* fp = fopen(filename, "w");
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
fprintf(fp, "%f ", image[row * width + col]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
void writeTextImage(const char* filename, uint width, uint height, int* image) {
FILE* fp = fopen(filename, "w");
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
fprintf(fp, "%d ", image[row * width + col]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
int main(int argc, char** argv) {
hipInit(0);
chooseLargestGPU(true);
char* filename = "colors.ppm";
unsigned int width;
unsigned int height;
unsigned int* devRgbU;
loadPPM_rgbU(filename, &width, &height, &devRgbU);
printf("Width: %i, height: %i\n", width, height, devRgbU);
float* devL;
float* devA;
float* devB;
float* devGrey;
rgbUtoLab3F(width, height, 2.5, devRgbU, &devL, &devA, &devB);
rgbUtoGreyF(width, height, devRgbU, &devGrey);
normalizeLab(width, height, devL, devA, devB);
float* L = getImage(width, height, devL);
float* a = getImage(width, height, devA);
float* b = getImage(width, height, devB);
float* grey = getImage(width, height, devGrey);
writeTextImage("L.txt", width, height, L);
writeTextImage("a.txt", width, height, a);
writeTextImage("b.txt", width, height, b);
writeTextImage("grey.txt", width, height, grey);
int border = 2;
float* devBorderedGrey;
mirrorImage(width, height, border, devGrey, &devBorderedGrey);
int borderWidth = 2 * border + width;
int borderHeight = 2 * border + height;
float* borderedGrey = getImage(borderWidth, borderHeight, devBorderedGrey);
writeTextImage("borderedGrey.txt", borderWidth, borderHeight, borderedGrey);
cutSavePGMf("borderedGrey.pgm", borderedGrey, borderWidth, borderHeight);
cutSavePGMf("grey.pgm", grey, width, height);
int* devQuantizedBorderedGrey;
quantizeImage(borderWidth, borderHeight, 25, devBorderedGrey, &devQuantizedBorderedGrey);
int* quantizedBorderedGrey = getImage(borderWidth, borderHeight, devQuantizedBorderedGrey);
writeTextImage("quantizedBorderedGrey.txt", borderWidth, borderHeight, quantizedBorderedGrey);
}
| 3a59749f0632abc17ab58a98c9da7af328e9af0e.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include "convert.h"
#include "stencilMVM.h"
#include <cutil.h>
float* getImage(uint width, uint height, float* devImage) {
int imageSize = width * height * sizeof(float);
float* result = (float*)malloc(imageSize);
CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost));
return result;
}
int* getImage(uint width, uint height, int* devImage) {
int imageSize = width * height * sizeof(int);
int* result = (int*)malloc(imageSize);
CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost));
return result;
}
void writeTextImage(const char* filename, uint width, uint height, float* image) {
FILE* fp = fopen(filename, "w");
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
fprintf(fp, "%f ", image[row * width + col]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
void writeTextImage(const char* filename, uint width, uint height, int* image) {
FILE* fp = fopen(filename, "w");
for(int row = 0; row < height; row++) {
for(int col = 0; col < width; col++) {
fprintf(fp, "%d ", image[row * width + col]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
int main(int argc, char** argv) {
cuInit(0);
chooseLargestGPU(true);
char* filename = "colors.ppm";
unsigned int width;
unsigned int height;
unsigned int* devRgbU;
loadPPM_rgbU(filename, &width, &height, &devRgbU);
printf("Width: %i, height: %i\n", width, height, devRgbU);
float* devL;
float* devA;
float* devB;
float* devGrey;
rgbUtoLab3F(width, height, 2.5, devRgbU, &devL, &devA, &devB);
rgbUtoGreyF(width, height, devRgbU, &devGrey);
normalizeLab(width, height, devL, devA, devB);
float* L = getImage(width, height, devL);
float* a = getImage(width, height, devA);
float* b = getImage(width, height, devB);
float* grey = getImage(width, height, devGrey);
writeTextImage("L.txt", width, height, L);
writeTextImage("a.txt", width, height, a);
writeTextImage("b.txt", width, height, b);
writeTextImage("grey.txt", width, height, grey);
int border = 2;
float* devBorderedGrey;
mirrorImage(width, height, border, devGrey, &devBorderedGrey);
int borderWidth = 2 * border + width;
int borderHeight = 2 * border + height;
float* borderedGrey = getImage(borderWidth, borderHeight, devBorderedGrey);
writeTextImage("borderedGrey.txt", borderWidth, borderHeight, borderedGrey);
cutSavePGMf("borderedGrey.pgm", borderedGrey, borderWidth, borderHeight);
cutSavePGMf("grey.pgm", grey, width, height);
int* devQuantizedBorderedGrey;
quantizeImage(borderWidth, borderHeight, 25, devBorderedGrey, &devQuantizedBorderedGrey);
int* quantizedBorderedGrey = getImage(borderWidth, borderHeight, devQuantizedBorderedGrey);
writeTextImage("quantizedBorderedGrey.txt", borderWidth, borderHeight, quantizedBorderedGrey);
}
|
5d656b0847114510ffa9c04d8d7ad670ce5ba878.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__fillToInds3D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float A = 2;
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int ldb = 1;
int rdb = 1;
int *I = NULL;
hipMalloc(&I, XSIZE*YSIZE);
int nrows = 1;
int *J = NULL;
hipMalloc(&J, XSIZE*YSIZE);
int ncols = 1;
int *K = NULL;
hipMalloc(&K, XSIZE*YSIZE);
int nk = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__fillToInds3D), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__fillToInds3D), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__fillToInds3D), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5d656b0847114510ffa9c04d8d7ad670ce5ba878.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__fillToInds3D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float A = 2;
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int ldb = 1;
int rdb = 1;
int *I = NULL;
cudaMalloc(&I, XSIZE*YSIZE);
int nrows = 1;
int *J = NULL;
cudaMalloc(&J, XSIZE*YSIZE);
int ncols = 1;
int *K = NULL;
cudaMalloc(&K, XSIZE*YSIZE);
int nk = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__fillToInds3D<<<gridBlock,threadBlock>>>(A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__fillToInds3D<<<gridBlock,threadBlock>>>(A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__fillToInds3D<<<gridBlock,threadBlock>>>(A,B,ldb,rdb,I,nrows,J,ncols,K,nk);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e8fa306e5e70315c3765864a71d733a47fbdd2ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normal_eqs_flow_weighted_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_CO = NULL;
hipMalloc(&d_CO, XSIZE*YSIZE);
const float2 *d_flow_compact = NULL;
hipMalloc(&d_flow_compact, XSIZE*YSIZE);
const float *d_Zbuffer_flow_compact = NULL;
hipMalloc(&d_Zbuffer_flow_compact, XSIZE*YSIZE);
const int *d_ind_flow_Zbuffer = NULL;
hipMalloc(&d_ind_flow_Zbuffer, XSIZE*YSIZE);
float fx = 1;
float fy = 1;
float ox = 1;
float oy = 1;
int n_rows = 1;
int n_cols = 1;
const int *d_n_values_flow = NULL;
hipMalloc(&d_n_values_flow, XSIZE*YSIZE);
const int *d_start_ind_flow = NULL;
hipMalloc(&d_start_ind_flow, XSIZE*YSIZE);
const float *d_abs_res_scales = NULL;
hipMalloc(&d_abs_res_scales, XSIZE*YSIZE);
float w_flow = 1;
float w_ar_flow = 1;
const float *d_dTR = NULL;
hipMalloc(&d_dTR, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normal_eqs_flow_weighted_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normal_eqs_flow_weighted_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normal_eqs_flow_weighted_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e8fa306e5e70315c3765864a71d733a47fbdd2ed.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normal_eqs_flow_weighted_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_CO = NULL;
cudaMalloc(&d_CO, XSIZE*YSIZE);
const float2 *d_flow_compact = NULL;
cudaMalloc(&d_flow_compact, XSIZE*YSIZE);
const float *d_Zbuffer_flow_compact = NULL;
cudaMalloc(&d_Zbuffer_flow_compact, XSIZE*YSIZE);
const int *d_ind_flow_Zbuffer = NULL;
cudaMalloc(&d_ind_flow_Zbuffer, XSIZE*YSIZE);
float fx = 1;
float fy = 1;
float ox = 1;
float oy = 1;
int n_rows = 1;
int n_cols = 1;
const int *d_n_values_flow = NULL;
cudaMalloc(&d_n_values_flow, XSIZE*YSIZE);
const int *d_start_ind_flow = NULL;
cudaMalloc(&d_start_ind_flow, XSIZE*YSIZE);
const float *d_abs_res_scales = NULL;
cudaMalloc(&d_abs_res_scales, XSIZE*YSIZE);
float w_flow = 1;
float w_ar_flow = 1;
const float *d_dTR = NULL;
cudaMalloc(&d_dTR, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normal_eqs_flow_weighted_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normal_eqs_flow_weighted_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normal_eqs_flow_weighted_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,fx,fy,ox,oy,n_rows,n_cols,d_n_values_flow,d_start_ind_flow,d_abs_res_scales,w_flow,w_ar_flow,d_dTR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9eb78c2c422261fbc066c31c9d855f59a7f35105.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2020 GISBDW. All rights reserved.
#include "bnb_gpu.hpp"
// clang-format on
#include <hip/hip_runtime_api.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include <boost/graph/iteration_macros.hpp>
#include <memory>
namespace td {
namespace {
auto constexpr kThreads = 64;
auto constexpr kBlocks = 2048;
auto constexpr kBruteForceLen = 6;
__device__ int GetComponents(int8_t* component_belong_info,
int n,
int const* const offsets,
int const* const out_edges) {
int ncomponent = 0;
for (int v = 0; v < n; ++v) {
if (component_belong_info[v] == -2) {
component_belong_info[v] = ncomponent;
bool new_vertex_found = true;
while (new_vertex_found) {
new_vertex_found = false;
for (int w = 0; w < n; ++w)
if (component_belong_info[w] == ncomponent) {
for (int i = offsets[w]; i < offsets[w + 1]; ++i) {
if (component_belong_info[out_edges[i]] == -2) {
component_belong_info[out_edges[i]] = ncomponent;
new_vertex_found = true;
}
}
}
}
ncomponent++;
}
}
return ncomponent;
}
__device__ int EliminatePermutation(int8_t* component_belong_info,
int8_t* component_depth_info,
int8_t* my_perm,
int perm_len,
int n,
int const* const offsets,
int const* const out_edges) {
int ncomponent;
for (int i = 0; i < n; ++i) {
component_belong_info[i] = -2;
component_depth_info[i] = 0;
}
for (int i = 0; i < perm_len; ++i) {
auto belongs_to = component_belong_info[my_perm[i]];
for (int j = 0; j < n; ++j)
if (component_belong_info[j] == belongs_to)
++component_depth_info[j];
for (int j = 0; j < n; ++j)
component_belong_info[j] = -2;
for (int j = 0; j <= i; ++j)
component_belong_info[my_perm[j]] = -1;
ncomponent = GetComponents(component_belong_info, n, offsets, out_edges);
}
return ncomponent;
}
__device__ int LowerBound(int8_t* component_belong_info,
int8_t* component_depth_info,
int ncomponent,
int n,
int const* const offsets,
int const* const out_edges) {
int lower_bound = 0;
int current_bound;
for (int i = 0; i < ncomponent; ++i) {
int nverts = 0;
int nedges = 0;
int vert = -1;
for (int v = 0; v < n; ++v) {
if (component_belong_info[v] != i)
continue;
++nverts;
for (int j = offsets[v]; j < offsets[v + 1]; ++j)
if (component_belong_info[out_edges[j]] == i)
++nedges;
vert = v;
}
nedges >>= 1;
current_bound =
::ceil(0.5 + nverts -
std::sqrt(0.25 + nverts * nverts - nverts - 2 * nedges)) +
component_depth_info[vert];
if (current_bound > lower_bound)
lower_bound = current_bound;
}
return lower_bound;
}
__device__ void FinishPermutation(int8_t* const component_belong_info,
int8_t* const component_depth_info,
int8_t* const perm,
int perm_len,
int n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
auto cur_perm_len = perm_len;
int ncomponent =
EliminatePermutation(component_belong_info, component_depth_info, perm,
cur_perm_len, n, offsets, out_edges);
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) >= *best_td)
return;
auto* taken = new int8_t[n + 1];
for (int i = 0; i <= n; ++i)
taken[i] = false;
for (int i = 0; i < perm_len; ++i)
taken[perm[i]] = true;
while (cur_perm_len >= perm_len) {
if (ncomponent + cur_perm_len == n) {
int max_td = 0;
for (int i = 0; i < n; ++i)
max_td = max(
max_td, component_depth_info[i] + (component_belong_info[i] != -1));
atomicMin(best_td, max_td);
--cur_perm_len;
}
do {
if (perm[cur_perm_len] != -1) {
taken[perm[cur_perm_len]] = false;
ncomponent =
EliminatePermutation(component_belong_info, component_depth_info,
perm, cur_perm_len, n, offsets, out_edges);
}
do {
++perm[cur_perm_len];
} while (perm[cur_perm_len] < n && taken[perm[cur_perm_len]] &&
component_belong_info[perm[cur_perm_len]] != 0);
if (perm[cur_perm_len] >= n)
break;
ncomponent =
EliminatePermutation(component_belong_info, component_depth_info,
perm, cur_perm_len + 1, n, offsets, out_edges);
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) < *best_td)
break;
} while (true);
if (perm[cur_perm_len] >= n) {
perm[cur_perm_len] = -1;
--cur_perm_len;
continue;
}
taken[perm[cur_perm_len]] = true;
++cur_perm_len;
}
delete[] taken;
}
__global__ void GenerateKernel(int* const in,
int* const buf,
int* stack_head,
int const n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
extern __shared__ int8_t buf_shared[];
int8_t* perm = &buf_shared[threadIdx.x * n];
for (int i = 0; i < n; ++i)
buf_shared[i * blockDim.x + threadIdx.x] = -1;
__syncthreads();
for (int i = threadIdx.x; i < blockDim.x * n; i += blockDim.x)
buf_shared[i] = in[i + blockIdx.x * blockDim.x * n];
__syncthreads();
int perm_len = -1;
while (perm_len < n && perm[++perm_len] != -1) {
}
int8_t* component_depth_info = &buf_shared[threadIdx.x * n + blockDim.x * n];
int8_t* component_belong_info =
&buf_shared[threadIdx.x * n + 2 * blockDim.x * n];
for (int i = 0; i < n; ++i) {
component_belong_info[i] = 0;
component_depth_info[i] = 0;
}
int ncomponent = 0;
for (int i = 0; i < perm_len; ++i) {
ncomponent = 0;
auto belongs_to = component_belong_info[perm[i]];
for (int j = 0; j < n; ++j)
if (component_belong_info[j] == belongs_to)
++component_depth_info[j];
for (int j = 0; j < n; ++j)
component_belong_info[j] = -2;
for (int j = 0; j <= i; ++j)
component_belong_info[perm[j]] = -1;
ncomponent = GetComponents(component_belong_info, n, offsets, out_edges);
}
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) >= *best_td)
return;
if (ncomponent + perm_len == n) {
int max_td = 0;
for (int i = 0; i < n; ++i)
max_td = max(max_td,
component_depth_info[i] + (component_belong_info[i] != -1));
atomicMin(best_td, max_td);
return;
}
// sprobuj wsadzic nowe
if (__syncthreads_and(perm_len > n - kBruteForceLen))
return FinishPermutation(component_belong_info, component_depth_info, perm,
perm_len, n, offsets, out_edges, best_td);
for (int i = 0; i < n; ++i) {
int j;
for (j = 0; j < n && perm[j] != -1; ++j) {
if (i == perm[j])
break;
}
if (j < n && perm[j] == -1 && component_belong_info[i] == 0) {
perm[j] = i;
auto new_perm_index = atomicAdd(stack_head, 1);
for (int k = 0; k < n; ++k) {
buf[new_perm_index * n + k] = perm[k];
}
perm[j] = -1;
}
}
}
__global__ void BruteForceKernel(int* const buf,
int stack_head,
int const n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
auto my_perm_index = stack_head - blockIdx.x * blockDim.x - 1;
extern __shared__ int8_t buf_shared[];
int8_t* perm = &buf_shared[threadIdx.x * n];
int8_t* component_depth_info = &buf_shared[threadIdx.x * n + blockDim.x * n];
int8_t* component_belong_info =
&buf_shared[threadIdx.x * n + 2 * blockDim.x * n];
for (int i = 0; i < n; ++i)
buf_shared[i * blockDim.x + threadIdx.x] = -1;
__syncthreads();
for (int i = threadIdx.x; i < blockDim.x * n; i += blockDim.x) {
buf_shared[i] = buf[my_perm_index * n + i];
buf[my_perm_index * n + i] = -1;
}
__syncthreads();
int perm_len = -1;
while (perm_len < n && perm[++perm_len] != -1) {
}
return FinishPermutation(component_belong_info, component_depth_info, perm,
perm_len, n, offsets, out_edges, best_td);
}
} // namespace
int BnBGPU::Run(BoostGraph const& g, std::size_t heur_td) {
auto n = boost::num_vertices(g);
std::size_t global_mem;
hipMemGetInfo(&global_mem, nullptr);
global_mem *= 0.9;
global_mem /= sizeof(int);
thrust::device_vector<int> buf((global_mem / n) * n, -1);
for (int i = 0; i < n; ++i)
buf[i * n] = i;
thrust::device_vector<int> temporary_buf(kThreads * kBlocks * n, -1);
thrust::device_vector<int> stack_head(1, n);
thrust::device_vector<int> best_td(1, heur_td);
thrust::device_vector<int> offsets(n + 1, 0);
thrust::device_vector<int> out_edge(2 * boost::num_edges(g), 0);
int offset = 0;
for (int v = 0; v < n; ++v) {
offsets[v] = offset;
BGL_FORALL_ADJ_T(v, neigh, g, BnBGPU::BoostGraph) {
out_edge[offset++] = neigh;
}
}
offsets[n] = 2 * boost::num_edges(g);
int perms = 0;
while (stack_head[0] > 0) {
while (stack_head[0] > 0 &&
kThreads * kBlocks * n < buf.size() / n - stack_head[0]) {
int sh = stack_head[0];
// std::cout << sh << std::endl;
if (sh < kThreads) {
thrust::copy(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, -1);
stack_head[0] = 0;
hipLaunchKernelGGL(( GenerateKernel), dim3(1), dim3(sh), 3 * sh * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += sh;
} else if (sh < kBlocks * kThreads) {
thrust::copy(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, -1);
stack_head[0] = 0;
hipLaunchKernelGGL(( GenerateKernel), dim3(sh / kThreads), dim3(kThreads),
3 * kThreads * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += (sh / kThreads) * kThreads;
} else {
thrust::copy(thrust::device,
buf.begin() + stack_head[0] * n - kThreads * kBlocks * n,
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device,
buf.begin() + stack_head[0] * n - kThreads * kBlocks * n,
buf.begin() + stack_head[0] * n, -1);
stack_head[0] -= kThreads * kBlocks;
hipLaunchKernelGGL(( GenerateKernel), dim3(kBlocks), dim3(kThreads),
3 * kThreads * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += kBlocks * kThreads;
}
auto err = hipStreamSynchronize(0);
if (err)
std::cout << hipGetErrorString(err) << std::endl;
}
while (stack_head[0] > 0 &&
kThreads * kBlocks * n > buf.size() / n - stack_head[0]) {
int sh = stack_head[0];
// std::cout << sh << std::endl;
if (sh < kThreads) {
hipLaunchKernelGGL(( BruteForceKernel), dim3(1), dim3(sh), 3 * sh * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += sh;
stack_head[0] -= sh;
} else if (sh < kBlocks * kThreads) {
hipLaunchKernelGGL(( BruteForceKernel), dim3(sh / kThreads), dim3(kThreads),
3 * kThreads * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += (sh / kThreads) * kThreads;
stack_head[0] -= (sh / kThreads) * kThreads;
} else {
hipLaunchKernelGGL(( BruteForceKernel), dim3(kBlocks), dim3(kThreads),
3 * kThreads * n * sizeof(int8_t), 0,
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += kBlocks * kThreads;
stack_head[0] -= kBlocks * kThreads;
}
auto err = hipStreamSynchronize(0);
if (err)
std::cout << hipGetErrorString(err) << std::endl;
}
}
return best_td[0];
}
} // namespace td
| 9eb78c2c422261fbc066c31c9d855f59a7f35105.cu | // Copyright 2020 GISBDW. All rights reserved.
#include "bnb_gpu.hpp"
// clang-format on
#include <cuda_runtime_api.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include <boost/graph/iteration_macros.hpp>
#include <memory>
namespace td {
namespace {
auto constexpr kThreads = 64;
auto constexpr kBlocks = 2048;
auto constexpr kBruteForceLen = 6;
__device__ int GetComponents(int8_t* component_belong_info,
int n,
int const* const offsets,
int const* const out_edges) {
int ncomponent = 0;
for (int v = 0; v < n; ++v) {
if (component_belong_info[v] == -2) {
component_belong_info[v] = ncomponent;
bool new_vertex_found = true;
while (new_vertex_found) {
new_vertex_found = false;
for (int w = 0; w < n; ++w)
if (component_belong_info[w] == ncomponent) {
for (int i = offsets[w]; i < offsets[w + 1]; ++i) {
if (component_belong_info[out_edges[i]] == -2) {
component_belong_info[out_edges[i]] = ncomponent;
new_vertex_found = true;
}
}
}
}
ncomponent++;
}
}
return ncomponent;
}
__device__ int EliminatePermutation(int8_t* component_belong_info,
int8_t* component_depth_info,
int8_t* my_perm,
int perm_len,
int n,
int const* const offsets,
int const* const out_edges) {
int ncomponent;
for (int i = 0; i < n; ++i) {
component_belong_info[i] = -2;
component_depth_info[i] = 0;
}
for (int i = 0; i < perm_len; ++i) {
auto belongs_to = component_belong_info[my_perm[i]];
for (int j = 0; j < n; ++j)
if (component_belong_info[j] == belongs_to)
++component_depth_info[j];
for (int j = 0; j < n; ++j)
component_belong_info[j] = -2;
for (int j = 0; j <= i; ++j)
component_belong_info[my_perm[j]] = -1;
ncomponent = GetComponents(component_belong_info, n, offsets, out_edges);
}
return ncomponent;
}
__device__ int LowerBound(int8_t* component_belong_info,
int8_t* component_depth_info,
int ncomponent,
int n,
int const* const offsets,
int const* const out_edges) {
int lower_bound = 0;
int current_bound;
for (int i = 0; i < ncomponent; ++i) {
int nverts = 0;
int nedges = 0;
int vert = -1;
for (int v = 0; v < n; ++v) {
if (component_belong_info[v] != i)
continue;
++nverts;
for (int j = offsets[v]; j < offsets[v + 1]; ++j)
if (component_belong_info[out_edges[j]] == i)
++nedges;
vert = v;
}
nedges >>= 1;
current_bound =
std::ceil(0.5 + nverts -
std::sqrt(0.25 + nverts * nverts - nverts - 2 * nedges)) +
component_depth_info[vert];
if (current_bound > lower_bound)
lower_bound = current_bound;
}
return lower_bound;
}
__device__ void FinishPermutation(int8_t* const component_belong_info,
int8_t* const component_depth_info,
int8_t* const perm,
int perm_len,
int n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
auto cur_perm_len = perm_len;
int ncomponent =
EliminatePermutation(component_belong_info, component_depth_info, perm,
cur_perm_len, n, offsets, out_edges);
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) >= *best_td)
return;
auto* taken = new int8_t[n + 1];
for (int i = 0; i <= n; ++i)
taken[i] = false;
for (int i = 0; i < perm_len; ++i)
taken[perm[i]] = true;
while (cur_perm_len >= perm_len) {
if (ncomponent + cur_perm_len == n) {
int max_td = 0;
for (int i = 0; i < n; ++i)
max_td = max(
max_td, component_depth_info[i] + (component_belong_info[i] != -1));
atomicMin(best_td, max_td);
--cur_perm_len;
}
do {
if (perm[cur_perm_len] != -1) {
taken[perm[cur_perm_len]] = false;
ncomponent =
EliminatePermutation(component_belong_info, component_depth_info,
perm, cur_perm_len, n, offsets, out_edges);
}
do {
++perm[cur_perm_len];
} while (perm[cur_perm_len] < n && taken[perm[cur_perm_len]] &&
component_belong_info[perm[cur_perm_len]] != 0);
if (perm[cur_perm_len] >= n)
break;
ncomponent =
EliminatePermutation(component_belong_info, component_depth_info,
perm, cur_perm_len + 1, n, offsets, out_edges);
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) < *best_td)
break;
} while (true);
if (perm[cur_perm_len] >= n) {
perm[cur_perm_len] = -1;
--cur_perm_len;
continue;
}
taken[perm[cur_perm_len]] = true;
++cur_perm_len;
}
delete[] taken;
}
__global__ void GenerateKernel(int* const in,
int* const buf,
int* stack_head,
int const n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
extern __shared__ int8_t buf_shared[];
int8_t* perm = &buf_shared[threadIdx.x * n];
for (int i = 0; i < n; ++i)
buf_shared[i * blockDim.x + threadIdx.x] = -1;
__syncthreads();
for (int i = threadIdx.x; i < blockDim.x * n; i += blockDim.x)
buf_shared[i] = in[i + blockIdx.x * blockDim.x * n];
__syncthreads();
int perm_len = -1;
while (perm_len < n && perm[++perm_len] != -1) {
}
int8_t* component_depth_info = &buf_shared[threadIdx.x * n + blockDim.x * n];
int8_t* component_belong_info =
&buf_shared[threadIdx.x * n + 2 * blockDim.x * n];
for (int i = 0; i < n; ++i) {
component_belong_info[i] = 0;
component_depth_info[i] = 0;
}
int ncomponent = 0;
for (int i = 0; i < perm_len; ++i) {
ncomponent = 0;
auto belongs_to = component_belong_info[perm[i]];
for (int j = 0; j < n; ++j)
if (component_belong_info[j] == belongs_to)
++component_depth_info[j];
for (int j = 0; j < n; ++j)
component_belong_info[j] = -2;
for (int j = 0; j <= i; ++j)
component_belong_info[perm[j]] = -1;
ncomponent = GetComponents(component_belong_info, n, offsets, out_edges);
}
if (LowerBound(component_belong_info, component_depth_info, ncomponent, n,
offsets, out_edges) >= *best_td)
return;
if (ncomponent + perm_len == n) {
int max_td = 0;
for (int i = 0; i < n; ++i)
max_td = max(max_td,
component_depth_info[i] + (component_belong_info[i] != -1));
atomicMin(best_td, max_td);
return;
}
// sprobuj wsadzic nowe
if (__syncthreads_and(perm_len > n - kBruteForceLen))
return FinishPermutation(component_belong_info, component_depth_info, perm,
perm_len, n, offsets, out_edges, best_td);
for (int i = 0; i < n; ++i) {
int j;
for (j = 0; j < n && perm[j] != -1; ++j) {
if (i == perm[j])
break;
}
if (j < n && perm[j] == -1 && component_belong_info[i] == 0) {
perm[j] = i;
auto new_perm_index = atomicAdd(stack_head, 1);
for (int k = 0; k < n; ++k) {
buf[new_perm_index * n + k] = perm[k];
}
perm[j] = -1;
}
}
}
__global__ void BruteForceKernel(int* const buf,
int stack_head,
int const n,
int const* const offsets,
int const* const out_edges,
int* best_td) {
auto my_perm_index = stack_head - blockIdx.x * blockDim.x - 1;
extern __shared__ int8_t buf_shared[];
int8_t* perm = &buf_shared[threadIdx.x * n];
int8_t* component_depth_info = &buf_shared[threadIdx.x * n + blockDim.x * n];
int8_t* component_belong_info =
&buf_shared[threadIdx.x * n + 2 * blockDim.x * n];
for (int i = 0; i < n; ++i)
buf_shared[i * blockDim.x + threadIdx.x] = -1;
__syncthreads();
for (int i = threadIdx.x; i < blockDim.x * n; i += blockDim.x) {
buf_shared[i] = buf[my_perm_index * n + i];
buf[my_perm_index * n + i] = -1;
}
__syncthreads();
int perm_len = -1;
while (perm_len < n && perm[++perm_len] != -1) {
}
return FinishPermutation(component_belong_info, component_depth_info, perm,
perm_len, n, offsets, out_edges, best_td);
}
} // namespace
int BnBGPU::Run(BoostGraph const& g, std::size_t heur_td) {
auto n = boost::num_vertices(g);
std::size_t global_mem;
cudaMemGetInfo(&global_mem, nullptr);
global_mem *= 0.9;
global_mem /= sizeof(int);
thrust::device_vector<int> buf((global_mem / n) * n, -1);
for (int i = 0; i < n; ++i)
buf[i * n] = i;
thrust::device_vector<int> temporary_buf(kThreads * kBlocks * n, -1);
thrust::device_vector<int> stack_head(1, n);
thrust::device_vector<int> best_td(1, heur_td);
thrust::device_vector<int> offsets(n + 1, 0);
thrust::device_vector<int> out_edge(2 * boost::num_edges(g), 0);
int offset = 0;
for (int v = 0; v < n; ++v) {
offsets[v] = offset;
BGL_FORALL_ADJ_T(v, neigh, g, BnBGPU::BoostGraph) {
out_edge[offset++] = neigh;
}
}
offsets[n] = 2 * boost::num_edges(g);
int perms = 0;
while (stack_head[0] > 0) {
while (stack_head[0] > 0 &&
kThreads * kBlocks * n < buf.size() / n - stack_head[0]) {
int sh = stack_head[0];
// std::cout << sh << std::endl;
if (sh < kThreads) {
thrust::copy(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, -1);
stack_head[0] = 0;
GenerateKernel<<<1, sh, 3 * sh * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += sh;
} else if (sh < kBlocks * kThreads) {
thrust::copy(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device, buf.begin(),
buf.begin() + stack_head[0] * n, -1);
stack_head[0] = 0;
GenerateKernel<<<sh / kThreads, kThreads,
3 * kThreads * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += (sh / kThreads) * kThreads;
} else {
thrust::copy(thrust::device,
buf.begin() + stack_head[0] * n - kThreads * kBlocks * n,
buf.begin() + stack_head[0] * n, temporary_buf.begin());
thrust::fill(thrust::device,
buf.begin() + stack_head[0] * n - kThreads * kBlocks * n,
buf.begin() + stack_head[0] * n, -1);
stack_head[0] -= kThreads * kBlocks;
GenerateKernel<<<kBlocks, kThreads,
3 * kThreads * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(temporary_buf.data()),
thrust::raw_pointer_cast(buf.data()),
thrust::raw_pointer_cast(stack_head.data()), n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += kBlocks * kThreads;
}
auto err = cudaStreamSynchronize(0);
if (err)
std::cout << cudaGetErrorString(err) << std::endl;
}
while (stack_head[0] > 0 &&
kThreads * kBlocks * n > buf.size() / n - stack_head[0]) {
int sh = stack_head[0];
// std::cout << sh << std::endl;
if (sh < kThreads) {
BruteForceKernel<<<1, sh, 3 * sh * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += sh;
stack_head[0] -= sh;
} else if (sh < kBlocks * kThreads) {
BruteForceKernel<<<sh / kThreads, kThreads,
3 * kThreads * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += (sh / kThreads) * kThreads;
stack_head[0] -= (sh / kThreads) * kThreads;
} else {
BruteForceKernel<<<kBlocks, kThreads,
3 * kThreads * n * sizeof(int8_t)>>>(
thrust::raw_pointer_cast(buf.data()), sh, n,
thrust::raw_pointer_cast(offsets.data()),
thrust::raw_pointer_cast(out_edge.data()),
thrust::raw_pointer_cast(best_td.data()));
perms += kBlocks * kThreads;
stack_head[0] -= kBlocks * kThreads;
}
auto err = cudaStreamSynchronize(0);
if (err)
std::cout << cudaGetErrorString(err) << std::endl;
}
}
return best_td[0];
}
} // namespace td
|
f77868148c20f2deddd6224600a3b7c0f65b0c2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
typedef pointb<3> rgb;
int main(int argc, char *argv[]) {
//
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
//CUDA
#ifdef USE_ROCM
auto gts_rgb = mem_clone(ts_rgb, device_tag{});
#else
auto >s_rgb = ts_rgb;
#endif
//
auto glts_rgb_shift_zero = gts_rgb - rgb::all(128);
auto glts_rgb_stride = stride(glts_rgb_shift_zero, 2);
auto glts_rgb_normalized = tensor_cast<pointf<3>>(glts_rgb_stride) / pointf<3>::all(128.0f);
//memory,
auto gts_rgb_normalized = glts_rgb_normalized.persist();
#ifdef USE_ROCM
hip::device_synchronize();
auto ts_rgb_normalized = mem_clone(gts_rgb_normalized, host_tag{});
#else
auto &ts_rgb_normalized = gts_rgb_normalized;
#endif
//
tensor<float, 2> ts_red(ts_rgb_normalized.shape());
tensor<float, 2> ts_green(ts_rgb_normalized.shape());
tensor<float, 2> ts_blue(ts_rgb_normalized.shape());
//ziptupletuple
auto ts_zip_rgb = zip(ts_red, ts_green, ts_blue);
//tuplepoint<byte, 3>
auto ts_zip_point = point_view(ts_zip_rgb);
//ts_red, ts_green, ts_bluets_zip_point
copy(ts_rgb_normalized, ts_zip_point);
//raw
auto output_red_path = argc < 3 ? "red.raw_data" : argv[2];
auto output_green_path = argc < 4 ? "green.raw_data" : argv[3];
auto output_blue_path = argc < 5 ? "blue.raw_data" : argv[4];
io::write_raw_data(output_red_path, ts_red);
io::write_raw_data(output_green_path, ts_green);
io::write_raw_data(output_blue_path, ts_blue);
return 0;
}
| f77868148c20f2deddd6224600a3b7c0f65b0c2e.cu | #include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
typedef pointb<3> rgb;
int main(int argc, char *argv[]) {
//加载图像
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
//选择是否使用CUDA
#ifdef USE_CUDA
auto gts_rgb = mem_clone(ts_rgb, device_tag{});
#else
auto >s_rgb = ts_rgb;
#endif
//图像像素归一化
auto glts_rgb_shift_zero = gts_rgb - rgb::all(128);
auto glts_rgb_stride = stride(glts_rgb_shift_zero, 2);
auto glts_rgb_normalized = tensor_cast<pointf<3>>(glts_rgb_stride) / pointf<3>::all(128.0f);
//前面并未进行实质的计算,这一步将上面的运算合并处理并把结果写入到memory中, 避免了额外的内存开销
auto gts_rgb_normalized = glts_rgb_normalized.persist();
#ifdef USE_CUDA
cuda::device_synchronize();
auto ts_rgb_normalized = mem_clone(gts_rgb_normalized, host_tag{});
#else
auto &ts_rgb_normalized = gts_rgb_normalized;
#endif
//定义三个通道的图像数据
tensor<float, 2> ts_red(ts_rgb_normalized.shape());
tensor<float, 2> ts_green(ts_rgb_normalized.shape());
tensor<float, 2> ts_blue(ts_rgb_normalized.shape());
//zip操作,就返回tuple数据,tuple的元素为上面三个通道对应元素的引用
auto ts_zip_rgb = zip(ts_red, ts_green, ts_blue);
//让tuple元素可以和point<byte, 3>可以相互转换
auto ts_zip_point = point_view(ts_zip_rgb);
//拷贝结果到ts_red, ts_green, ts_blue中,因为ts_zip_point的元素是指向这三个通道的引用
copy(ts_rgb_normalized, ts_zip_point);
//保存raw数据
auto output_red_path = argc < 3 ? "red.raw_data" : argv[2];
auto output_green_path = argc < 4 ? "green.raw_data" : argv[3];
auto output_blue_path = argc < 5 ? "blue.raw_data" : argv[4];
io::write_raw_data(output_red_path, ts_red);
io::write_raw_data(output_green_path, ts_green);
io::write_raw_data(output_blue_path, ts_blue);
return 0;
}
|
c903d70a8ba19b93b115f85f47e6e3dd6fafe6c1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <cstdlib>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/benchmark/benchmark.h"
namespace k2 {
template <typename T>
static BenchmarkStat BenchmarkExclusiveSum(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = ::min(500, 1000000 / dim);
Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000, GetSeed());
BenchmarkStat stat;
stat.op_name = "ExclusiveSum";
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name();
stat.device_type = device_type;
// there are overloads of ExclusiveSum, so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = ::min(500, 1000000 / dim);
Array1<int32_t> sizes =
RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed());
Array1<int32_t> row_splits = ExclusiveSum(sizes);
Array1<int32_t> row_ids(context, row_splits.Back());
BenchmarkStat stat;
stat.op_name = "RowSplitsToRowIds_" + std::to_string(row_ids.Dim());
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
// there are overloads of RowSplitsToRowIds,
// so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(void (*)(const Array1<int32_t> &, Array1<int32_t> *))(
&RowSplitsToRowIds),
row_splits, &row_ids);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static BenchmarkStat BenchmarkRowIdsToRowSplits(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = ::min(500, 1000000 / dim);
Array1<int32_t> sizes =
RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed());
Array1<int32_t> row_splits = ExclusiveSum(sizes);
Array1<int32_t> row_ids(context, row_splits.Back());
RowSplitsToRowIds(row_splits, &row_ids);
BenchmarkStat stat;
stat.op_name = "RowIdsToRowSplits";
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
// there are overloads of RowIdsToRowSplits,
// so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(void (*)(const Array1<int32_t> &, Array1<int32_t> *))(
&RowIdsToRowSplits),
row_ids, &row_splits);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
template <typename T>
static void RegisterBenchmarkExclusiveSum(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) +
"_" + std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkExclusiveSum<T>(s, device_type);
});
}
}
static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" +
std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkRowSplitsToRowIds(s, device_type);
});
}
}
static void RegisterBenchmarkRowIdsToRowSplits(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("RowIdsToRowSplits", device_type) + "_" +
std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkRowIdsToRowSplits(s, device_type);
});
}
}
static void RunArrayOpsBenchmark() {
PrintEnvironmentInfo();
RegisterBenchmarkExclusiveSum<int32_t>(kCpu);
RegisterBenchmarkExclusiveSum<int32_t>(kCuda);
RegisterBenchmarkRowSplitsToRowIds(kCpu);
RegisterBenchmarkRowSplitsToRowIds(kCuda);
RegisterBenchmarkRowIdsToRowSplits(kCpu);
RegisterBenchmarkRowIdsToRowSplits(kCuda);
// Users can set a regular expression via environment
// variable `K2_BENCHMARK_FILTER` such that only benchmarks
// with name matching the pattern are candidates to run.
const char *filter = std::getenv("K2_BENCHMARK_FILTER");
if (filter != nullptr) FilterRegisteredBenchmarks(filter);
std::vector<BenchmarkRun> results = RunBechmarks();
std::cout << BenchmarkRun::GetFieldsName() << "\n";
for (const auto &r : results) {
std::cout << r << "\n";
}
}
} // namespace k2
int main() {
k2::RunArrayOpsBenchmark();
return 0;
}
| c903d70a8ba19b93b115f85f47e6e3dd6fafe6c1.cu | /**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <cstdlib>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/benchmark/benchmark.h"
namespace k2 {
template <typename T>
static BenchmarkStat BenchmarkExclusiveSum(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = std::min(500, 1000000 / dim);
Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000, GetSeed());
BenchmarkStat stat;
stat.op_name = "ExclusiveSum";
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name();
stat.device_type = device_type;
// there are overloads of ExclusiveSum, so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = std::min(500, 1000000 / dim);
Array1<int32_t> sizes =
RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed());
Array1<int32_t> row_splits = ExclusiveSum(sizes);
Array1<int32_t> row_ids(context, row_splits.Back());
BenchmarkStat stat;
stat.op_name = "RowSplitsToRowIds_" + std::to_string(row_ids.Dim());
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
// there are overloads of RowSplitsToRowIds,
// so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(void (*)(const Array1<int32_t> &, Array1<int32_t> *))(
&RowSplitsToRowIds),
row_splits, &row_ids);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static BenchmarkStat BenchmarkRowIdsToRowSplits(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = std::min(500, 1000000 / dim);
Array1<int32_t> sizes =
RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed());
Array1<int32_t> row_splits = ExclusiveSum(sizes);
Array1<int32_t> row_ids(context, row_splits.Back());
RowSplitsToRowIds(row_splits, &row_ids);
BenchmarkStat stat;
stat.op_name = "RowIdsToRowSplits";
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
// there are overloads of RowIdsToRowSplits,
// so we use an explicit conversion here.
stat.eplased_per_iter =
BenchmarkOp(num_iter, context,
(void (*)(const Array1<int32_t> &, Array1<int32_t> *))(
&RowIdsToRowSplits),
row_ids, &row_splits);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
template <typename T>
static void RegisterBenchmarkExclusiveSum(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) +
"_" + std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkExclusiveSum<T>(s, device_type);
});
}
}
static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" +
std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkRowSplitsToRowIds(s, device_type);
});
}
}
static void RegisterBenchmarkRowIdsToRowSplits(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000,
5000, 10000, 100000};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("RowIdsToRowSplits", device_type) + "_" +
std::to_string(s);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkRowIdsToRowSplits(s, device_type);
});
}
}
static void RunArrayOpsBenchmark() {
PrintEnvironmentInfo();
RegisterBenchmarkExclusiveSum<int32_t>(kCpu);
RegisterBenchmarkExclusiveSum<int32_t>(kCuda);
RegisterBenchmarkRowSplitsToRowIds(kCpu);
RegisterBenchmarkRowSplitsToRowIds(kCuda);
RegisterBenchmarkRowIdsToRowSplits(kCpu);
RegisterBenchmarkRowIdsToRowSplits(kCuda);
// Users can set a regular expression via environment
// variable `K2_BENCHMARK_FILTER` such that only benchmarks
// with name matching the pattern are candidates to run.
const char *filter = std::getenv("K2_BENCHMARK_FILTER");
if (filter != nullptr) FilterRegisteredBenchmarks(filter);
std::vector<BenchmarkRun> results = RunBechmarks();
std::cout << BenchmarkRun::GetFieldsName() << "\n";
for (const auto &r : results) {
std::cout << r << "\n";
}
}
} // namespace k2
int main() {
k2::RunArrayOpsBenchmark();
return 0;
}
|
4cdb08306f8dfe996fc822deaf3ef4b0e3b71c51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/octree_full_voxel_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void octree2voxel_forward(Dtype* top_data,
const Dtype* bottom_data, const int channel, const int bottom_h,
const unsigned* xyz2key, const int voxel_num, const int nthreads) {
CUDA_KERNEL_LOOP(i, nthreads) {
int k = i % voxel_num;
int t = i / voxel_num;
int c = t % channel;
int n = t / channel;
top_data[i] = bottom_data[c * bottom_h + n * voxel_num + xyz2key[k]];
}
}
template <typename Dtype>
__global__ void octree2voxel_backward(const Dtype* top_diff,
Dtype* bottom_diff, const int channel, const int bottom_h,
const unsigned* xyz2key, const int voxel_num, const int nthreads) {
CUDA_KERNEL_LOOP(i, nthreads) {
int k = i % voxel_num;
int t = i / voxel_num;
int c = t % channel;
int n = t / channel;
bottom_diff[c * bottom_h + n * voxel_num + xyz2key[k]] = top_diff[i];
}
}
template <typename Dtype>
void Octree2FullVoxelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// calc mapping
if (index_mapper_.count() == 0) build_mapping(curr_depth_);
int voxel_num = 1 << 3 * curr_depth_;
int channel = bottom[0]->shape(1);
int bottom_h = bottom[0]->shape(2);
int nthreads = batch_size_ * channel * voxel_num;
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const unsigned* xyz_to_key = index_mapper_.gpu_data();
hipLaunchKernelGGL(( octree2voxel_forward<Dtype>) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
top_data, bottom_data, channel, bottom_h, index_mapper_.gpu_data(), voxel_num, nthreads);
}
template <typename Dtype>
void Octree2FullVoxelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
int voxel_num = 1 << 3 * curr_depth_;
int channel = bottom[0]->shape(1);
int bottom_h = bottom[0]->shape(2);
int nthreads = batch_size_ * channel * voxel_num;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned* xyz_to_key = index_mapper_.gpu_data();
hipLaunchKernelGGL(( octree2voxel_backward<Dtype>) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
top_diff, bottom_diff, channel, bottom_h, index_mapper_.gpu_data(), voxel_num, nthreads);
}
INSTANTIATE_LAYER_GPU_FUNCS(Octree2FullVoxelLayer);
} | 4cdb08306f8dfe996fc822deaf3ef4b0e3b71c51.cu | #include "caffe/layers/octree_full_voxel_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void octree2voxel_forward(Dtype* top_data,
const Dtype* bottom_data, const int channel, const int bottom_h,
const unsigned* xyz2key, const int voxel_num, const int nthreads) {
CUDA_KERNEL_LOOP(i, nthreads) {
int k = i % voxel_num;
int t = i / voxel_num;
int c = t % channel;
int n = t / channel;
top_data[i] = bottom_data[c * bottom_h + n * voxel_num + xyz2key[k]];
}
}
template <typename Dtype>
__global__ void octree2voxel_backward(const Dtype* top_diff,
Dtype* bottom_diff, const int channel, const int bottom_h,
const unsigned* xyz2key, const int voxel_num, const int nthreads) {
CUDA_KERNEL_LOOP(i, nthreads) {
int k = i % voxel_num;
int t = i / voxel_num;
int c = t % channel;
int n = t / channel;
bottom_diff[c * bottom_h + n * voxel_num + xyz2key[k]] = top_diff[i];
}
}
template <typename Dtype>
void Octree2FullVoxelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// calc mapping
if (index_mapper_.count() == 0) build_mapping(curr_depth_);
int voxel_num = 1 << 3 * curr_depth_;
int channel = bottom[0]->shape(1);
int bottom_h = bottom[0]->shape(2);
int nthreads = batch_size_ * channel * voxel_num;
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const unsigned* xyz_to_key = index_mapper_.gpu_data();
octree2voxel_forward<Dtype> <<< CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >>>(
top_data, bottom_data, channel, bottom_h, index_mapper_.gpu_data(), voxel_num, nthreads);
}
template <typename Dtype>
void Octree2FullVoxelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
int voxel_num = 1 << 3 * curr_depth_;
int channel = bottom[0]->shape(1);
int bottom_h = bottom[0]->shape(2);
int nthreads = batch_size_ * channel * voxel_num;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned* xyz_to_key = index_mapper_.gpu_data();
octree2voxel_backward<Dtype> <<< CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >>>(
top_diff, bottom_diff, channel, bottom_h, index_mapper_.gpu_data(), voxel_num, nthreads);
}
INSTANTIATE_LAYER_GPU_FUNCS(Octree2FullVoxelLayer);
} |
f516a70cb7572d1fef66c9378bc6c37eb1fc2b15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define DIM 1000
#define CUDA_CHECK( err ) (cuda_checker(err, __FILE__, __LINE__))
static void cuda_checker( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator*( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator+( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
__host__ __device__ int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(-0.8, 0.156);
cppComplex a(jx, jy);
int i = 0;
for(i = 0; i < 200; i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void julia_set_cpu() {
unsigned char *pixels = new unsigned char[DIM * DIM];
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++y) {
pixels[x + y * DIM] = 255 * julia(x, y);
}
}
FILE *f = fopen("julia_cpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
delete [] pixels;
}
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
/*Begin the GPU part*/
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
__global__ void kernel( unsigned char *ptr ) {
int tid = threadIdx.x + blockIdx.x * blockDim.x; // row index
while (tid < DIM * DIM) {
ptr[tid] = 255 * julia(tid % DIM, tid / DIM);
tid += blockDim.x * gridDim.x;
}
}
void julia_set_gpu() {
unsigned char *pixels = new unsigned char[DIM * DIM]; // for host
unsigned char *dev_bitmap; // for device
CUDA_CHECK(hipMalloc((void**)&dev_bitmap, DIM * DIM * sizeof(unsigned char))); // allocate memory on gpu
CUDA_CHECK(hipMemcpy(dev_bitmap, pixels, DIM * DIM * sizeof(unsigned char), hipMemcpyHostToDevice)); // copy memory to device
int threads_per_block = 1024;
int blocks_per_grid = (DIM * DIM + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dev_bitmap); // execute
CUDA_CHECK(hipMemcpy(pixels, dev_bitmap, DIM * DIM * sizeof(unsigned char), hipMemcpyDeviceToHost)); // copy memory to host
// write to file
FILE *f = fopen("julia_gpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f); // 0 .. 255
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
// free memory
CUDA_CHECK(hipFree(dev_bitmap));
delete [] pixels;
}
int main( void ) {
float time;
hipEvent_t start, stop;
// record cpu execution time
CUDA_CHECK(hipEventCreate(&start));
CUDA_CHECK(hipEventCreate(&stop));
CUDA_CHECK(hipEventRecord(start, 0));
julia_set_cpu();
CUDA_CHECK(hipEventRecord(stop, 0));
CUDA_CHECK(hipEventSynchronize(stop));
CUDA_CHECK(hipEventElapsedTime(&time, start, stop));
printf("Time to generate using CPU: %3.1f ms \n", time);
// record gpu execution time
CUDA_CHECK(hipEventCreate(&start));
CUDA_CHECK(hipEventCreate(&stop));
CUDA_CHECK(hipEventRecord(start, 0));
julia_set_gpu();
CUDA_CHECK(hipEventRecord(stop, 0));
CUDA_CHECK(hipEventSynchronize(stop));
CUDA_CHECK(hipEventElapsedTime(&time, start, stop));
printf("Time to generate using GPU: %3.1f ms \n", time);
// flush buffer
hipDeviceReset();
} | f516a70cb7572d1fef66c9378bc6c37eb1fc2b15.cu | #include <stdio.h>
#include <stdlib.h>
#define DIM 1000
#define CUDA_CHECK( err ) (cuda_checker(err, __FILE__, __LINE__))
static void cuda_checker( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator*( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator+( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
__host__ __device__ int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(-0.8, 0.156);
cppComplex a(jx, jy);
int i = 0;
for(i = 0; i < 200; i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void julia_set_cpu() {
unsigned char *pixels = new unsigned char[DIM * DIM];
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++y) {
pixels[x + y * DIM] = 255 * julia(x, y);
}
}
FILE *f = fopen("julia_cpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
delete [] pixels;
}
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
/*Begin the GPU part*/
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
__global__ void kernel( unsigned char *ptr ) {
int tid = threadIdx.x + blockIdx.x * blockDim.x; // row index
while (tid < DIM * DIM) {
ptr[tid] = 255 * julia(tid % DIM, tid / DIM);
tid += blockDim.x * gridDim.x;
}
}
void julia_set_gpu() {
unsigned char *pixels = new unsigned char[DIM * DIM]; // for host
unsigned char *dev_bitmap; // for device
CUDA_CHECK(cudaMalloc((void**)&dev_bitmap, DIM * DIM * sizeof(unsigned char))); // allocate memory on gpu
CUDA_CHECK(cudaMemcpy(dev_bitmap, pixels, DIM * DIM * sizeof(unsigned char), cudaMemcpyHostToDevice)); // copy memory to device
int threads_per_block = 1024;
int blocks_per_grid = (DIM * DIM + threads_per_block - 1) / threads_per_block;
kernel<<<blocks_per_grid, threads_per_block>>>(dev_bitmap); // execute
CUDA_CHECK(cudaMemcpy(pixels, dev_bitmap, DIM * DIM * sizeof(unsigned char), cudaMemcpyDeviceToHost)); // copy memory to host
// write to file
FILE *f = fopen("julia_gpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f); // 0 .. 255
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
// free memory
CUDA_CHECK(cudaFree(dev_bitmap));
delete [] pixels;
}
int main( void ) {
float time;
cudaEvent_t start, stop;
// record cpu execution time
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
julia_set_cpu();
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate using CPU: %3.1f ms \n", time);
// record gpu execution time
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
julia_set_gpu();
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate using GPU: %3.1f ms \n", time);
// flush buffer
cudaDeviceReset();
} |
1580680750b4c9f4d9da9a56dce605834134e42e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 4
#define TC 16
#define C 160
#define N 96
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[64];
float kernel_shared_local[48];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 10; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 35) / 120) * 784)) + (((int)blockIdx.y) * 56)) + ((((((int)threadIdx.x) * 35) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 1) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 1) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 2) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 2) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 3) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 3) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 4) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 4) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 5) % 30))) && ((((((int)threadIdx.x) * 35) + 5) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 5) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 5) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 5) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 6) % 30))) && ((((((int)threadIdx.x) * 35) + 6) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 6) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 6) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 6) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 7) % 30))) && ((((((int)threadIdx.x) * 35) + 7) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 7) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 7) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 7) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 8) % 30))) && ((((((int)threadIdx.x) * 35) + 8) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 8) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 8) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 8) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 9) % 30))) && ((((((int)threadIdx.x) * 35) + 9) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 9) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 9) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 9) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 10) % 30))) && ((((((int)threadIdx.x) * 35) + 10) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 10) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 10) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 10) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 11) % 30))) && ((((((int)threadIdx.x) * 35) + 11) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 11) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 11) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 11) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 12) % 30))) && ((((((int)threadIdx.x) * 35) + 12) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 12) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 12) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 12) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 13) % 30))) && ((((((int)threadIdx.x) * 35) + 13) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 13) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 13) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 13) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 14) % 30))) && ((((((int)threadIdx.x) * 35) + 14) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 14) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 14) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 14) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 15) % 30))) && ((((((int)threadIdx.x) * 35) + 15) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 15) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 15) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 15) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 16) % 30))) && ((((((int)threadIdx.x) * 35) + 16) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 16) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 16) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 16) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 17) % 30))) && ((((((int)threadIdx.x) * 35) + 17) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 17) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 17) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 17) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 18) % 30))) && ((((((int)threadIdx.x) * 35) + 18) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 18) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 18) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 18) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 19) % 30))) && ((((((int)threadIdx.x) * 35) + 19) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 19) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 19) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 19) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 20) % 30))) && ((((((int)threadIdx.x) * 35) + 20) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 20) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 20) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 20) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 21) % 30))) && ((((((int)threadIdx.x) * 35) + 21) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 21) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 21) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 21) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 22) % 30))) && ((((((int)threadIdx.x) * 35) + 22) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 22) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 22) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 22) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 23) % 30))) && ((((((int)threadIdx.x) * 35) + 23) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 23) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 23) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 23) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 24) % 30))) && ((((((int)threadIdx.x) * 35) + 24) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 24) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 24) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 24) % 30)) - 29))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 25) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 25) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1895) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 25) % 30))) && ((((((int)threadIdx.x) * 35) + 25) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 25) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 25) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 25) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 26) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 26) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1894) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 26) % 30))) && ((((((int)threadIdx.x) * 35) + 26) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 26) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 26) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 26) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 27) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 27) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1893) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 27) % 30))) && ((((((int)threadIdx.x) * 35) + 27) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 27) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 27) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 27) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 28) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 28) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1892) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 28) % 30))) && ((((((int)threadIdx.x) * 35) + 28) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 28) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 28) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 28) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 29) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 29) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1891) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 29) % 30))) && ((((((int)threadIdx.x) * 35) + 29) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 29) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 29) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 29) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 30) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 35) / 30)) < 63) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1890) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 30) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 30) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 31) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 31) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1889) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 31) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 31) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 32) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 32) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1888) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 32) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 32) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 33) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 33) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1887) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 33) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 33) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 34) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 34) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1886) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 34) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 34) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1137) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1136) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1135) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1134) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1133) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1132) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) {
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 3))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 30))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 31))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 32))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 33))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 120))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 121))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 122))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 123))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 150))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 151))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 152))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 153))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 240))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 241))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 242))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 243))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 270))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 271))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 272))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 273))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 360))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 361))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 362))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 363))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 390))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 391))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 392))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 393))];
pad_temp_shared_local[(32)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 480))];
pad_temp_shared_local[(33)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 481))];
pad_temp_shared_local[(34)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 482))];
pad_temp_shared_local[(35)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 483))];
pad_temp_shared_local[(36)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 510))];
pad_temp_shared_local[(37)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 511))];
pad_temp_shared_local[(38)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 512))];
pad_temp_shared_local[(39)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 513))];
pad_temp_shared_local[(40)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 600))];
pad_temp_shared_local[(41)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 601))];
pad_temp_shared_local[(42)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 602))];
pad_temp_shared_local[(43)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 603))];
pad_temp_shared_local[(44)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 630))];
pad_temp_shared_local[(45)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 631))];
pad_temp_shared_local[(46)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 632))];
pad_temp_shared_local[(47)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 633))];
pad_temp_shared_local[(48)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 720))];
pad_temp_shared_local[(49)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 721))];
pad_temp_shared_local[(50)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 722))];
pad_temp_shared_local[(51)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 723))];
pad_temp_shared_local[(52)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 750))];
pad_temp_shared_local[(53)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 751))];
pad_temp_shared_local[(54)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 752))];
pad_temp_shared_local[(55)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 753))];
pad_temp_shared_local[(56)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 840))];
pad_temp_shared_local[(57)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 841))];
pad_temp_shared_local[(58)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 842))];
pad_temp_shared_local[(59)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 843))];
pad_temp_shared_local[(60)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 870))];
pad_temp_shared_local[(61)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 871))];
pad_temp_shared_local[(62)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 872))];
pad_temp_shared_local[(63)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 873))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)))];
kernel_shared_local[(24)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 576))];
kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 1))];
kernel_shared_local[(25)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 577))];
kernel_shared_local[(2)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 2))];
kernel_shared_local[(26)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 578))];
kernel_shared_local[(3)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 9))];
kernel_shared_local[(27)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 585))];
kernel_shared_local[(4)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 10))];
kernel_shared_local[(28)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 586))];
kernel_shared_local[(5)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 11))];
kernel_shared_local[(29)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 587))];
kernel_shared_local[(6)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 18))];
kernel_shared_local[(30)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 594))];
kernel_shared_local[(7)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 19))];
kernel_shared_local[(31)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 595))];
kernel_shared_local[(8)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 20))];
kernel_shared_local[(32)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 596))];
kernel_shared_local[(9)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 27))];
kernel_shared_local[(33)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 603))];
kernel_shared_local[(10)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 28))];
kernel_shared_local[(34)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 604))];
kernel_shared_local[(11)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 29))];
kernel_shared_local[(35)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 605))];
kernel_shared_local[(12)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 36))];
kernel_shared_local[(36)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 612))];
kernel_shared_local[(13)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 37))];
kernel_shared_local[(37)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 613))];
kernel_shared_local[(14)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 38))];
kernel_shared_local[(38)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 614))];
kernel_shared_local[(15)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 45))];
kernel_shared_local[(39)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 621))];
kernel_shared_local[(16)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 46))];
kernel_shared_local[(40)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 622))];
kernel_shared_local[(17)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 47))];
kernel_shared_local[(41)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 623))];
kernel_shared_local[(18)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 54))];
kernel_shared_local[(42)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 630))];
kernel_shared_local[(19)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 55))];
kernel_shared_local[(43)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 631))];
kernel_shared_local[(20)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 56))];
kernel_shared_local[(44)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 632))];
kernel_shared_local[(21)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 63))];
kernel_shared_local[(45)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 639))];
kernel_shared_local[(22)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 64))];
kernel_shared_local[(46)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 640))];
kernel_shared_local[(23)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 65))];
kernel_shared_local[(47)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 641))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(24)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(24)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(25)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(25)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(26)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(26)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(27)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(27)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(27)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(27)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(28)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(4)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(28)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(4)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(28)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(4)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(28)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(29)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(5)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(29)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(5)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(29)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(5)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(29)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(6)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(30)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(30)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(30)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(6)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(30)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(7)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(31)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(7)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(31)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(7)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(31)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(31)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(8)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(32)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(8)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(32)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(8)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(32)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(8)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(32)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(9)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(33)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(9)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(33)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(9)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(33)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(9)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(33)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(10)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(34)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(10)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(34)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(10)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(34)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(10)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(34)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(11)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(35)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(11)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(35)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(11)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(35)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(11)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(35)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(12)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(36)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(12)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(36)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(12)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(36)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(12)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(36)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(13)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(37)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(13)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(37)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(13)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(37)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(13)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(37)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(14)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(38)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(14)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(38)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(14)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(38)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(14)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(38)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(15)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(39)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(15)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(39)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(15)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(39)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(15)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(39)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(16)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(40)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(16)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(40)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(16)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(40)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(16)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(40)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(17)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(41)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(17)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(41)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(17)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(41)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(17)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(41)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(18)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(42)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(18)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(42)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(18)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(42)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(18)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(42)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(19)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(43)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(19)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(43)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(19)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(43)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(19)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(43)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(20)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(44)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(20)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(44)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(20)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(44)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(20)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(44)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(21)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(45)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(21)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(45)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(21)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(45)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(21)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(45)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(22)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(46)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(22)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(46)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(22)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(46)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(22)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(46)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(23)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(47)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(23)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(47)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(23)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(47)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(23)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(47)]));
}
}
}
compute[(((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 28))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3164))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 29))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3165))] = compute_local[(7)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,14,12);
dim3 block(14,1,4);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| 1580680750b4c9f4d9da9a56dce605834134e42e.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 4
#define TC 16
#define C 160
#define N 96
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[64];
float kernel_shared_local[48];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 10; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 35) / 120) * 784)) + (((int)blockIdx.y) * 56)) + ((((((int)threadIdx.x) * 35) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 1) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 1) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 2) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 2) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 3) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 3) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 4) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 4) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 5) % 30))) && ((((((int)threadIdx.x) * 35) + 5) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 5) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 5) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 5) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 6) % 30))) && ((((((int)threadIdx.x) * 35) + 6) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 6) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 6) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 6) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 7) % 30))) && ((((((int)threadIdx.x) * 35) + 7) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 7) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 7) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 7) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 8) % 30))) && ((((((int)threadIdx.x) * 35) + 8) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 8) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 8) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 8) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 9) % 30))) && ((((((int)threadIdx.x) * 35) + 9) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 9) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 9) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 9) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 10) % 30))) && ((((((int)threadIdx.x) * 35) + 10) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 10) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 10) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 10) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 11) % 30))) && ((((((int)threadIdx.x) * 35) + 11) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 11) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 11) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 11) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 12) % 30))) && ((((((int)threadIdx.x) * 35) + 12) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 12) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 12) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 12) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 13) % 30))) && ((((((int)threadIdx.x) * 35) + 13) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 13) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 13) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 13) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 14) % 30))) && ((((((int)threadIdx.x) * 35) + 14) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 14) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 14) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 14) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 15) % 30))) && ((((((int)threadIdx.x) * 35) + 15) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 15) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 15) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 15) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 16) % 30))) && ((((((int)threadIdx.x) * 35) + 16) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 16) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 16) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 16) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 17) % 30))) && ((((((int)threadIdx.x) * 35) + 17) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 17) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 17) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 17) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 18) % 30))) && ((((((int)threadIdx.x) * 35) + 18) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 18) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 18) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 18) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 19) % 30))) && ((((((int)threadIdx.x) * 35) + 19) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 19) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 19) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 19) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 20) % 30))) && ((((((int)threadIdx.x) * 35) + 20) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 20) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 20) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 20) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 21) % 30))) && ((((((int)threadIdx.x) * 35) + 21) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 21) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 21) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 21) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 22) % 30))) && ((((((int)threadIdx.x) * 35) + 22) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 22) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 22) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 22) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 23) % 30))) && ((((((int)threadIdx.x) * 35) + 23) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 23) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 23) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 23) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 24) % 30))) && ((((((int)threadIdx.x) * 35) + 24) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 24) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 24) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 24) % 30)) - 29))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 25) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 25) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1895) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 25) % 30))) && ((((((int)threadIdx.x) * 35) + 25) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 25) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 25) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 25) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 26) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 26) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1894) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 26) % 30))) && ((((((int)threadIdx.x) * 35) + 26) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 26) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 26) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 26) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 27) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 27) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1893) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 27) % 30))) && ((((((int)threadIdx.x) * 35) + 27) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 27) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 27) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 27) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 28) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 28) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1892) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 28) % 30))) && ((((((int)threadIdx.x) * 35) + 28) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 28) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 28) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 28) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 29) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 29) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1891) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 29) % 30))) && ((((((int)threadIdx.x) * 35) + 29) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 29) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 29) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 29) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 30) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 35) / 30)) < 63) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1890) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 30) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 30) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 31) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 31) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1889) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 31) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 31) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 32) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 32) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1888) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 32) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 32) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 33) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 33) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1887) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 33) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 33) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 34) / 120)) < 16) {
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 34) / 30)) < 64) {
if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1886) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 34) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 34) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1137) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1136) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1135) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1134) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1133) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1132) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) {
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 3))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 30))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 31))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 32))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 33))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 120))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 121))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 122))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 123))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 150))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 151))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 152))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 153))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 240))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 241))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 242))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 243))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 270))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 271))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 272))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 273))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 360))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 361))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 362))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 363))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 390))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 391))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 392))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 393))];
pad_temp_shared_local[(32)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 480))];
pad_temp_shared_local[(33)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 481))];
pad_temp_shared_local[(34)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 482))];
pad_temp_shared_local[(35)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 483))];
pad_temp_shared_local[(36)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 510))];
pad_temp_shared_local[(37)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 511))];
pad_temp_shared_local[(38)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 512))];
pad_temp_shared_local[(39)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 513))];
pad_temp_shared_local[(40)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 600))];
pad_temp_shared_local[(41)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 601))];
pad_temp_shared_local[(42)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 602))];
pad_temp_shared_local[(43)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 603))];
pad_temp_shared_local[(44)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 630))];
pad_temp_shared_local[(45)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 631))];
pad_temp_shared_local[(46)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 632))];
pad_temp_shared_local[(47)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 633))];
pad_temp_shared_local[(48)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 720))];
pad_temp_shared_local[(49)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 721))];
pad_temp_shared_local[(50)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 722))];
pad_temp_shared_local[(51)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 723))];
pad_temp_shared_local[(52)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 750))];
pad_temp_shared_local[(53)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 751))];
pad_temp_shared_local[(54)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 752))];
pad_temp_shared_local[(55)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 753))];
pad_temp_shared_local[(56)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 840))];
pad_temp_shared_local[(57)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 841))];
pad_temp_shared_local[(58)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 842))];
pad_temp_shared_local[(59)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 843))];
pad_temp_shared_local[(60)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 870))];
pad_temp_shared_local[(61)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 871))];
pad_temp_shared_local[(62)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 872))];
pad_temp_shared_local[(63)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 873))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)))];
kernel_shared_local[(24)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 576))];
kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 1))];
kernel_shared_local[(25)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 577))];
kernel_shared_local[(2)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 2))];
kernel_shared_local[(26)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 578))];
kernel_shared_local[(3)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 9))];
kernel_shared_local[(27)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 585))];
kernel_shared_local[(4)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 10))];
kernel_shared_local[(28)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 586))];
kernel_shared_local[(5)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 11))];
kernel_shared_local[(29)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 587))];
kernel_shared_local[(6)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 18))];
kernel_shared_local[(30)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 594))];
kernel_shared_local[(7)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 19))];
kernel_shared_local[(31)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 595))];
kernel_shared_local[(8)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 20))];
kernel_shared_local[(32)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 596))];
kernel_shared_local[(9)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 27))];
kernel_shared_local[(33)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 603))];
kernel_shared_local[(10)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 28))];
kernel_shared_local[(34)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 604))];
kernel_shared_local[(11)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 29))];
kernel_shared_local[(35)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 605))];
kernel_shared_local[(12)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 36))];
kernel_shared_local[(36)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 612))];
kernel_shared_local[(13)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 37))];
kernel_shared_local[(37)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 613))];
kernel_shared_local[(14)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 38))];
kernel_shared_local[(38)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 614))];
kernel_shared_local[(15)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 45))];
kernel_shared_local[(39)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 621))];
kernel_shared_local[(16)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 46))];
kernel_shared_local[(40)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 622))];
kernel_shared_local[(17)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 47))];
kernel_shared_local[(41)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 623))];
kernel_shared_local[(18)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 54))];
kernel_shared_local[(42)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 630))];
kernel_shared_local[(19)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 55))];
kernel_shared_local[(43)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 631))];
kernel_shared_local[(20)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 56))];
kernel_shared_local[(44)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 632))];
kernel_shared_local[(21)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 63))];
kernel_shared_local[(45)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 639))];
kernel_shared_local[(22)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 64))];
kernel_shared_local[(46)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 640))];
kernel_shared_local[(23)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 65))];
kernel_shared_local[(47)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 641))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(24)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(24)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(25)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(25)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(26)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(26)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(27)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(27)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(27)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(27)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(28)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(4)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(28)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(4)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(28)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(4)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(28)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(29)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(5)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(29)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(5)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(29)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(5)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(29)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(6)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(30)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(30)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(30)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(6)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(30)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(7)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(31)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(7)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(31)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(7)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(31)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(31)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(8)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(32)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(8)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(32)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(8)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(32)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(8)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(32)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(9)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(33)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(9)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(33)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(9)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(33)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(9)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(33)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(10)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(34)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(10)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(34)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(10)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(34)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(10)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(34)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(11)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(35)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(11)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(35)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(11)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(35)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(11)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(35)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(12)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(36)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(12)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(36)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(12)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(36)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(12)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(36)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(13)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(37)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(13)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(37)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(13)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(37)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(13)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(37)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(14)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(38)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(14)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(38)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(14)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(38)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(14)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(38)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(15)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(39)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(15)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(39)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(15)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(39)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(15)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(39)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(16)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(40)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(16)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(40)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(16)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(40)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(16)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(40)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(17)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(41)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(17)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(41)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(17)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(41)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(17)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(41)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(18)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(42)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(18)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(42)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(18)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(42)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(18)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(42)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(19)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(43)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(19)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(43)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(19)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(43)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(19)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(43)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(20)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(44)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(20)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(44)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(20)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(44)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(20)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(44)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(21)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(45)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(21)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(45)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(21)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(45)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(21)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(45)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(22)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(46)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(22)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(46)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(22)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(46)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(22)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(46)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(23)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(47)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(23)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(47)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(23)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(47)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(23)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(47)]));
}
}
}
compute[(((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 28))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3164))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 29))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3165))] = compute_local[(7)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,14,12);
dim3 block(14,1,4);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
0603d8f2d2e1d1c7490592fc939cc2499b4898cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
typedef float MatrixVal;
typedef struct matrix {
MatrixVal *values;
unsigned int rows, cols;
} Matrix;
typedef struct input {
Matrix *A, *B;
} Input;
void setMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col, MatrixVal value) {
matrix->values[col + matrix->cols * row] = value;
}
MatrixVal getMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col) {
return matrix->values[col + matrix->cols * row];
}
void setMatrixSize(Matrix *matrix, unsigned int rows, unsigned int cols) {
matrix->values = (MatrixVal *) malloc(rows * cols * sizeof(MatrixVal));
matrix->cols = cols;
matrix->rows = rows;
}
Matrix *newMatrix() {
Matrix *matrix = (Matrix *) malloc(sizeof(Matrix));
return matrix;
}
void deleteMatrix(Matrix *matrix) {
free(matrix->values);
free(matrix);
}
Matrix *readMatrixFrom(FILE *src) {
unsigned int row, col, rows, cols;
MatrixVal value;
Matrix *matrix = newMatrix();
fscanf(src, "%u %u", &rows, &cols);
setMatrixSize(matrix, rows, cols);
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
fscanf(src, "%f", &value);
setMatrixPosition(matrix, row, col, value);
}
}
return matrix;
}
void deleteInput(Input input) {
deleteMatrix(input.A);
deleteMatrix(input.B);
}
Input readMatricesFromFiles(char *fileName1, char *fileName2) {
Input input;
FILE *file1, *file2;
file1 = fopen(fileName1, "r");
input.A = readMatrixFrom(file1);
fclose(file1);
file2 = fopen(fileName2, "r");
input.B = readMatrixFrom(file2);
fclose(file2);
return input;
}
Input readMatricesFromStdin() {
Input input;
input.A = readMatrixFrom(stdin);
input.B = readMatrixFrom(stdin);
return input;
}
void printUsage() {
printf("Usage: matrix-multiply <cuda|cpu> [file-with-matrix1 file-with-matrix2]\n");
printf("\nIf files are not passed, matrices are read from stdin.\n");
printf("Input format: n-rows n-cols entries\n");
printf("Output format: n-rows n-cols result-entries\n");
printf("Output is always to stdout\n");
}
void processUsingCuda(Input input) {
}
void processUsingCpu(Input input) {
}
int main(int argc, char **argv) {
Input input;
if (argc == 2) {
input = readMatricesFromStdin();
} else if (argc == 4) {
input = readMatricesFromFiles(argv[2], argv[3]);
} else {
printf("Error: wrong number of arguments: %d\n", argc);
printUsage();
return 1;
}
if (strcmp(argv[1], "cuda") == 0) {
processUsingCuda(input);
} else if (strcmp(argv[1], "cpu") == 0) {
processUsingCpu(input);
} else {
printf("Error: %s is not a valid form of computation\n");
printUsage();
return 2;
}
return 0;
}
| 0603d8f2d2e1d1c7490592fc939cc2499b4898cf.cu | #include <stdio.h>
#include <cuda.h>
typedef float MatrixVal;
typedef struct matrix {
MatrixVal *values;
unsigned int rows, cols;
} Matrix;
typedef struct input {
Matrix *A, *B;
} Input;
void setMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col, MatrixVal value) {
matrix->values[col + matrix->cols * row] = value;
}
MatrixVal getMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col) {
return matrix->values[col + matrix->cols * row];
}
void setMatrixSize(Matrix *matrix, unsigned int rows, unsigned int cols) {
matrix->values = (MatrixVal *) malloc(rows * cols * sizeof(MatrixVal));
matrix->cols = cols;
matrix->rows = rows;
}
Matrix *newMatrix() {
Matrix *matrix = (Matrix *) malloc(sizeof(Matrix));
return matrix;
}
void deleteMatrix(Matrix *matrix) {
free(matrix->values);
free(matrix);
}
Matrix *readMatrixFrom(FILE *src) {
unsigned int row, col, rows, cols;
MatrixVal value;
Matrix *matrix = newMatrix();
fscanf(src, "%u %u", &rows, &cols);
setMatrixSize(matrix, rows, cols);
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
fscanf(src, "%f", &value);
setMatrixPosition(matrix, row, col, value);
}
}
return matrix;
}
void deleteInput(Input input) {
deleteMatrix(input.A);
deleteMatrix(input.B);
}
Input readMatricesFromFiles(char *fileName1, char *fileName2) {
Input input;
FILE *file1, *file2;
file1 = fopen(fileName1, "r");
input.A = readMatrixFrom(file1);
fclose(file1);
file2 = fopen(fileName2, "r");
input.B = readMatrixFrom(file2);
fclose(file2);
return input;
}
Input readMatricesFromStdin() {
Input input;
input.A = readMatrixFrom(stdin);
input.B = readMatrixFrom(stdin);
return input;
}
void printUsage() {
printf("Usage: matrix-multiply <cuda|cpu> [file-with-matrix1 file-with-matrix2]\n");
printf("\nIf files are not passed, matrices are read from stdin.\n");
printf("Input format: n-rows n-cols entries\n");
printf("Output format: n-rows n-cols result-entries\n");
printf("Output is always to stdout\n");
}
void processUsingCuda(Input input) {
}
void processUsingCpu(Input input) {
}
int main(int argc, char **argv) {
Input input;
if (argc == 2) {
input = readMatricesFromStdin();
} else if (argc == 4) {
input = readMatricesFromFiles(argv[2], argv[3]);
} else {
printf("Error: wrong number of arguments: %d\n", argc);
printUsage();
return 1;
}
if (strcmp(argv[1], "cuda") == 0) {
processUsingCuda(input);
} else if (strcmp(argv[1], "cpu") == 0) {
processUsingCpu(input);
} else {
printf("Error: %s is not a valid form of computation\n");
printUsage();
return 2;
}
return 0;
}
|
4152a2503f97e3f2e54014f486f6ba48d44fbb79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdio>
#include "../common/cpu_bitmap.h"
#define INF 2e10f
#define DIM 1024
#define NSPHERES 20
struct Sphere
{
float x, y, z;
float r, g, b;
float radius;
__device__ float hit(int ox, int oy, float* n)
{
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius)
{
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/radius;
return dz + z;
}
return -INF;
}
};
__global__ void kernel(Sphere* spheres, unsigned char* bitmap)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = x - DIM/2;
float oy = y - DIM/2;
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < NSPHERES; i++)
{
float n;
float z = spheres[i].hit(ox, oy, &n);
if (z > maxz)
{
r = spheres[i].r*n;
g = spheres[i].g*n;
b = spheres[i].b*n;
maxz = z;
}
}
bitmap[offset*4 + 0] = (int)(r*255);
bitmap[offset*4 + 1] = (int)(g*255);
bitmap[offset*4 + 2] = (int)(b*255);
bitmap[offset*4 + 3] = 255;
}
#define rnd(x) (x*rand() / RAND_MAX)
int main()
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char* dev_bitmap;
hipMalloc((void**)&dev_bitmap, bitmap.image_size());
Sphere* dev_spheres;
hipMalloc((void**)&dev_spheres, NSPHERES*sizeof(Sphere));
Sphere* spheres = (Sphere*)malloc(NSPHERES*sizeof(Sphere));
for (int i = 0; i < NSPHERES; i++)
{
spheres[i].r = rnd(1.0f);
spheres[i].g = rnd(1.0f);
spheres[i].b = rnd(1.0f);
spheres[i].x = rnd(1000.0f) - 500;
spheres[i].y = rnd(1000.0f) - 500;
spheres[i].z = rnd(1000.0f) - 500;
spheres[i].radius = rnd(100.0f) + 20;
}
hipMemcpy(dev_spheres, spheres, NSPHERES*sizeof(Sphere), hipMemcpyHostToDevice);
dim3 gridDim(DIM/16, DIM/16);
dim3 blockDim(16, 16);
hipLaunchKernelGGL(( kernel), dim3(gridDim), dim3(blockDim), 0, 0, dev_spheres, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time used: %.2fms\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
bitmap.display_and_exit();
free(spheres);
hipFree(dev_spheres);
hipFree(dev_bitmap);
return 0;
}
| 4152a2503f97e3f2e54014f486f6ba48d44fbb79.cu | #include <cmath>
#include <cstdio>
#include "../common/cpu_bitmap.h"
#define INF 2e10f
#define DIM 1024
#define NSPHERES 20
struct Sphere
{
float x, y, z;
float r, g, b;
float radius;
__device__ float hit(int ox, int oy, float* n)
{
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius)
{
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/radius;
return dz + z;
}
return -INF;
}
};
__global__ void kernel(Sphere* spheres, unsigned char* bitmap)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = x - DIM/2;
float oy = y - DIM/2;
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < NSPHERES; i++)
{
float n;
float z = spheres[i].hit(ox, oy, &n);
if (z > maxz)
{
r = spheres[i].r*n;
g = spheres[i].g*n;
b = spheres[i].b*n;
maxz = z;
}
}
bitmap[offset*4 + 0] = (int)(r*255);
bitmap[offset*4 + 1] = (int)(g*255);
bitmap[offset*4 + 2] = (int)(b*255);
bitmap[offset*4 + 3] = 255;
}
#define rnd(x) (x*rand() / RAND_MAX)
int main()
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char* dev_bitmap;
cudaMalloc((void**)&dev_bitmap, bitmap.image_size());
Sphere* dev_spheres;
cudaMalloc((void**)&dev_spheres, NSPHERES*sizeof(Sphere));
Sphere* spheres = (Sphere*)malloc(NSPHERES*sizeof(Sphere));
for (int i = 0; i < NSPHERES; i++)
{
spheres[i].r = rnd(1.0f);
spheres[i].g = rnd(1.0f);
spheres[i].b = rnd(1.0f);
spheres[i].x = rnd(1000.0f) - 500;
spheres[i].y = rnd(1000.0f) - 500;
spheres[i].z = rnd(1000.0f) - 500;
spheres[i].radius = rnd(100.0f) + 20;
}
cudaMemcpy(dev_spheres, spheres, NSPHERES*sizeof(Sphere), cudaMemcpyHostToDevice);
dim3 gridDim(DIM/16, DIM/16);
dim3 blockDim(16, 16);
kernel<<<gridDim, blockDim>>>(dev_spheres, dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time used: %.2fms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
bitmap.display_and_exit();
free(spheres);
cudaFree(dev_spheres);
cudaFree(dev_bitmap);
return 0;
}
|
8b2ef93830d396cfeea4200288810ad2fa02a61b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(
tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SparseSGDFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const framework::Tensor& learning_rate,
framework::Tensor* output) {
auto in_height = input.height();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in_height, out_dims[0]);
auto& in_value = input.value();
auto& in_rows = input.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height);
auto* in_data = in_value.data<T>();
auto* out_data = output->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in_rows.size());
hipLaunchKernelGGL(( SparseSGDFunctorKernel<T, 256>), dim3(grid), dim3(threads), 0, context.stream(),
in_data, in_rows.data(), learning_rate.data<T>(), out_data,
in_row_numel);
}
};
template struct SparseSGDFunctor<platform::CUDADeviceContext, float>;
template struct SparseSGDFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sgd, ops::SGDOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::SGDOpKernel<paddle::platform::CUDADeviceContext, double>);
| 8b2ef93830d396cfeea4200288810ad2fa02a61b.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(
tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SparseSGDFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const framework::Tensor& learning_rate,
framework::Tensor* output) {
auto in_height = input.height();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in_height, out_dims[0]);
auto& in_value = input.value();
auto& in_rows = input.rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height);
auto* in_data = in_value.data<T>();
auto* out_data = output->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in_rows.size());
SparseSGDFunctorKernel<T, 256><<<grid, threads, 0, context.stream()>>>(
in_data, in_rows.data(), learning_rate.data<T>(), out_data,
in_row_numel);
}
};
template struct SparseSGDFunctor<platform::CUDADeviceContext, float>;
template struct SparseSGDFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sgd, ops::SGDOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::SGDOpKernel<paddle::platform::CUDADeviceContext, double>);
|
3a7e83099f0a585cbda68d730505600caf077d03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// from quantity_ext.c
__global__ void update(
int N,
double timestep,
double * centroid_values,
double * explicit_update,
double * semi_implicit_update)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
double denominator, x;
if (k >= N)
return;
// Divide semi_implicit update by conserved quantity
//for (k=0; k<N; k++) {
x = centroid_values[k];
if (x == 0.0) {
semi_implicit_update[k] = 0.0;
} else {
semi_implicit_update[k] /= x;
}
//}
// Explicit updates
//for (k=0; k<N; k++) {
centroid_values[k] += timestep*explicit_update[k];
//}
// Semi implicit updates
//for (k=0; k<N; k++) {
denominator = 1.0 - timestep*semi_implicit_update[k];
if (denominator <= 0.0) {
return;
} else {
//Update conserved_quantities from semi implicit updates
centroid_values[k] /= denominator;
}
//}
// Reset semi_implicit_update here ready for next time step
//memset(semi_implicit_update, 0, N*sizeof(double));
}
| 3a7e83099f0a585cbda68d730505600caf077d03.cu | // from quantity_ext.c
__global__ void update(
int N,
double timestep,
double * centroid_values,
double * explicit_update,
double * semi_implicit_update)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
double denominator, x;
if (k >= N)
return;
// Divide semi_implicit update by conserved quantity
//for (k=0; k<N; k++) {
x = centroid_values[k];
if (x == 0.0) {
semi_implicit_update[k] = 0.0;
} else {
semi_implicit_update[k] /= x;
}
//}
// Explicit updates
//for (k=0; k<N; k++) {
centroid_values[k] += timestep*explicit_update[k];
//}
// Semi implicit updates
//for (k=0; k<N; k++) {
denominator = 1.0 - timestep*semi_implicit_update[k];
if (denominator <= 0.0) {
return;
} else {
//Update conserved_quantities from semi implicit updates
centroid_values[k] /= denominator;
}
//}
// Reset semi_implicit_update here ready for next time step
//memset(semi_implicit_update, 0, N*sizeof(double));
}
|
3226b6c6ae568fe0b6d96e7851d270631c2a2033.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <math_functions.h>
extern "C"
__global__ void
cpyTest(float* answer, int size, float cnst)
{
int i=0;
for(i=0;i<size;i++)
answer[i] = i * cnst;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
int sf = sizeof(float);
int size = 1000;
float scale = 2.3f;
float* d_0;
CUDA_SAFE_CALL(hipMalloc( (void**) &d_0, sf * size));
dim3 threads(1, 1);
dim3 grids(1, 1);
// Calling kernel
hipLaunchKernelGGL(( cpyTest), dim3(grids), dim3(threads), 0, 0, d_0, size, scale);
float *h_0 = (float*)malloc(sf*size);
CUDA_SAFE_CALL(hipMemcpy(h_0, d_0, sf*size, hipMemcpyDeviceToHost));
int i;
for(i=0;i<size;i++)
printf("%dth answer = %f\n", i, h_0[i]);
free(h_0);
CUT_EXIT(argc, argv);
}
| 3226b6c6ae568fe0b6d96e7851d270631c2a2033.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cutil.h>
#include <math_functions.h>
extern "C"
__global__ void
cpyTest(float* answer, int size, float cnst)
{
int i=0;
for(i=0;i<size;i++)
answer[i] = i * cnst;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
int sf = sizeof(float);
int size = 1000;
float scale = 2.3f;
float* d_0;
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_0, sf * size));
dim3 threads(1, 1);
dim3 grids(1, 1);
// Calling kernel
cpyTest<<<grids, threads>>>(d_0, size, scale);
float *h_0 = (float*)malloc(sf*size);
CUDA_SAFE_CALL(cudaMemcpy(h_0, d_0, sf*size, cudaMemcpyDeviceToHost));
int i;
for(i=0;i<size;i++)
printf("%dth answer = %f\n", i, h_0[i]);
free(h_0);
CUT_EXIT(argc, argv);
}
|
b64efd92427c485110621676f017789287457d20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CONV_SOBEL_SIZE 3
#define CONV_GAUSSIAN_SIZE 5
__constant__ char SOBELX[CONV_SOBEL_SIZE*CONV_SOBEL_SIZE] = {-1,0,1,-2,0,2,-1,0,1};
__constant__ char SOBELY[CONV_SOBEL_SIZE*CONV_SOBEL_SIZE] = {1,2,1,0,0,0,-1,-2,-1};
__constant__ char GAUSSIAN[CONV_GAUSSIAN_SIZE*CONV_GAUSSIAN_SIZE] = {1,4,6,4,1,4,16,24,16,4,6,24,36,24,6,4,16,24,16,4,1,4,6,4,1};
__global__ void rgb_to_grayscale( unsigned char* imageInput,
unsigned char* imageOutput,
int width,
int height,
int yIndexorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads
if((xIndex<width) && (yIndex<height))
{
//Location of pixel in imageInput
const int yIndexor_tid = yIndex * yIndexorWidthStep + (3 * xIndex);
//Location of gray pixel in imageOutput
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = imageInput[yIndexor_tid];
const unsigned char green = imageInput[yIndexor_tid + 1];
const unsigned char red = imageInput[yIndexor_tid + 2];
//same weights as cv::COLOR_RGB2GRAY
const float gray = red * 0.299f + green * 0.587f + blue * 0.114f;
imageOutput[gray_tid] = static_cast<unsigned char>(gray);
}
}
__device__ unsigned char normalize(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return (unsigned char)value;
}
//funcion global de filtro de sobel
__global__ void sobel(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth){
//2D Index of current thread
unsigned int xIndex = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int yIndex = blockIdx.x*blockDim.x+threadIdx.x;
int temp_sobel_x = 0;
int temp_sobel_y = 0;
int start_xIndex = xIndex - (maskWidth/2);
int start_yIndex = yIndex - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_yIndex + j >=0 && start_yIndex + j < width) \
&&(start_xIndex + i >=0 && start_xIndex + i < height)){
temp_sobel_x += imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * SOBELX[i*maskWidth+j];
temp_sobel_y += imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * SOBELY[i*maskWidth+j];
}
}
}
temp_sobel_x = normalize(temp_sobel_x);
temp_sobel_y = normalize(temp_sobel_y);
imageOutput[xIndex*width+yIndex] = (int)sqrt((float)(temp_sobel_y*temp_sobel_y)+(temp_sobel_x*temp_sobel_x));
}
//funcion global de filtro de sobel
__global__ void gaussian(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth){
//2D Index of current thread
unsigned int xIndex = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int yIndex = blockIdx.x*blockDim.x+threadIdx.x;
float temp_gaussian = 0;
int start_xIndex = xIndex - (maskWidth/2);
int start_yIndex = yIndex - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_yIndex + j >=0 && start_yIndex + j < width) \
&&(start_xIndex + i >=0 && start_xIndex + i < height)){
temp_gaussian += (float)imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * ((float)GAUSSIAN[i*maskWidth+j]/(float)255);
}
}
}
imageOutput[xIndex*width+yIndex] = normalize(temp_gaussian);;
}
extern "C" void cuda_grayscale(unsigned char* imageInput,
unsigned char* imageOutput,
int width,
int height,
int yIndexorWidthStep,
int grayWidthStep,
dim3 grid,
dim3 block_size)
{
hipLaunchKernelGGL(( rgb_to_grayscale) , dim3(grid), dim3(block_size) , 0, 0, (unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,yIndexorWidthStep,grayWidthStep);
}
extern "C" void cuda_Sobel(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth,
dim3 grid,
dim3 block_size)
{
hipLaunchKernelGGL(( sobel) , dim3(grid), dim3(block_size) , 0, 0, (unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,maskWidth);
}
extern "C" void cuda_Gaussian(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth,
dim3 grid,
dim3 block_size)
{
hipLaunchKernelGGL(( gaussian) , dim3(grid), dim3(block_size) , 0, 0, (unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,maskWidth);
} | b64efd92427c485110621676f017789287457d20.cu | #define CONV_SOBEL_SIZE 3
#define CONV_GAUSSIAN_SIZE 5
__constant__ char SOBELX[CONV_SOBEL_SIZE*CONV_SOBEL_SIZE] = {-1,0,1,-2,0,2,-1,0,1};
__constant__ char SOBELY[CONV_SOBEL_SIZE*CONV_SOBEL_SIZE] = {1,2,1,0,0,0,-1,-2,-1};
__constant__ char GAUSSIAN[CONV_GAUSSIAN_SIZE*CONV_GAUSSIAN_SIZE] = {1,4,6,4,1,4,16,24,16,4,6,24,36,24,6,4,16,24,16,4,1,4,6,4,1};
__global__ void rgb_to_grayscale( unsigned char* imageInput,
unsigned char* imageOutput,
int width,
int height,
int yIndexorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads
if((xIndex<width) && (yIndex<height))
{
//Location of pixel in imageInput
const int yIndexor_tid = yIndex * yIndexorWidthStep + (3 * xIndex);
//Location of gray pixel in imageOutput
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = imageInput[yIndexor_tid];
const unsigned char green = imageInput[yIndexor_tid + 1];
const unsigned char red = imageInput[yIndexor_tid + 2];
//same weights as cv::COLOR_RGB2GRAY
const float gray = red * 0.299f + green * 0.587f + blue * 0.114f;
imageOutput[gray_tid] = static_cast<unsigned char>(gray);
}
}
__device__ unsigned char normalize(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return (unsigned char)value;
}
//funcion global de filtro de sobel
__global__ void sobel(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth){
//2D Index of current thread
unsigned int xIndex = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int yIndex = blockIdx.x*blockDim.x+threadIdx.x;
int temp_sobel_x = 0;
int temp_sobel_y = 0;
int start_xIndex = xIndex - (maskWidth/2);
int start_yIndex = yIndex - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_yIndex + j >=0 && start_yIndex + j < width) \
&&(start_xIndex + i >=0 && start_xIndex + i < height)){
temp_sobel_x += imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * SOBELX[i*maskWidth+j];
temp_sobel_y += imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * SOBELY[i*maskWidth+j];
}
}
}
temp_sobel_x = normalize(temp_sobel_x);
temp_sobel_y = normalize(temp_sobel_y);
imageOutput[xIndex*width+yIndex] = (int)sqrt((float)(temp_sobel_y*temp_sobel_y)+(temp_sobel_x*temp_sobel_x));
}
//funcion global de filtro de sobel
__global__ void gaussian(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth){
//2D Index of current thread
unsigned int xIndex = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int yIndex = blockIdx.x*blockDim.x+threadIdx.x;
float temp_gaussian = 0;
int start_xIndex = xIndex - (maskWidth/2);
int start_yIndex = yIndex - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_yIndex + j >=0 && start_yIndex + j < width) \
&&(start_xIndex + i >=0 && start_xIndex + i < height)){
temp_gaussian += (float)imageInput[(start_xIndex + i)*width+(start_yIndex + j)] * ((float)GAUSSIAN[i*maskWidth+j]/(float)255);
}
}
}
imageOutput[xIndex*width+yIndex] = normalize(temp_gaussian);;
}
extern "C" void cuda_grayscale(unsigned char* imageInput,
unsigned char* imageOutput,
int width,
int height,
int yIndexorWidthStep,
int grayWidthStep,
dim3 grid,
dim3 block_size)
{
rgb_to_grayscale <<< grid, block_size >>> ((unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,yIndexorWidthStep,grayWidthStep);
}
extern "C" void cuda_Sobel(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth,
dim3 grid,
dim3 block_size)
{
sobel <<< grid, block_size >>> ((unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,maskWidth);
}
extern "C" void cuda_Gaussian(unsigned char *imageInput,
unsigned char *imageOutput,
int width,
int height,
unsigned int maskWidth,
dim3 grid,
dim3 block_size)
{
gaussian <<< grid, block_size >>> ((unsigned char*)imageInput,(unsigned char*)imageOutput, width, height,maskWidth);
} |
98c1931a9b084ad53284e45889336ac8344671f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
#define TOTAL_DATA 256
#define BLOCK_PER_THREAT 64
#define BLOCK_SIZE 32
#define RADIUS 3
__global__ void stensil_1d (int *in, int *out){
__shared__ int temp[BLOCK_SIZE + 2*RADIUS];
int gIdx = threadIdx.x + (blockIdx.x*blockDim.x); // Global Index
int lIdx = threadIdx.x + RADIUS; // Local Index
// Read input elements into shared memory
temp[lIdx] = in[gIdx];
if(threadIdx.x < RADIUS) {
temp[lIdx - RADIUS] = in[gIdx - RADIUS];
temp[lIdx + BLOCK_SIZE] = in[gIdx + BLOCK_SIZE];
}
// Make sure all the threads are syncronized
__syncthreads();
// Apply the stencil
int result = 0;
for(int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lIdx + offset];
}
// Store the output
out[gIdx] = result;
}
// main
int main(void)
{
int *a, *b;
int *d_a, *d_b;
int size = TOTAL_DATA * sizeof(int);
int i;
// Allocate memory for the Host
a = (int *) malloc (size);
b = (int *) malloc (size);
// Allocate memory for the Device
hipMalloc ((void **) &d_a, size);
hipMalloc ((void **) &d_b, size);
// Initialize data (0 - 9)
for(i=0; i<TOTAL_DATA;i++) {
a[i] = rand() % 10;
}
// Copy the data to
hipMemcpy (a, d_a, size, hipMemcpyHostToDevice);
// Lets execute it
hipLaunchKernelGGL(( stensil_1d), dim3(TOTAL_DATA/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_a, d_b);
hipMemcpy (b, d_b, size, hipMemcpyDeviceToHost);
// Print the outcome
int j;
for(i=0;i<TOTAL_DATA;i++) {
printf("[%3d]\t", i);
for(j=0;j<2*RADIUS + 1;j++) printf("%d,", a[i+j]);
printf("\t--> %d\n", b[i]);
}
hipFree (d_a);
hipFree (d_b);
free (a);
free (b);
hipGetDeviceCount (&j);
printf("Total Device = %d\n", j);
return 0;
}
| 98c1931a9b084ad53284e45889336ac8344671f8.cu | #include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
#define TOTAL_DATA 256
#define BLOCK_PER_THREAT 64
#define BLOCK_SIZE 32
#define RADIUS 3
__global__ void stensil_1d (int *in, int *out){
__shared__ int temp[BLOCK_SIZE + 2*RADIUS];
int gIdx = threadIdx.x + (blockIdx.x*blockDim.x); // Global Index
int lIdx = threadIdx.x + RADIUS; // Local Index
// Read input elements into shared memory
temp[lIdx] = in[gIdx];
if(threadIdx.x < RADIUS) {
temp[lIdx - RADIUS] = in[gIdx - RADIUS];
temp[lIdx + BLOCK_SIZE] = in[gIdx + BLOCK_SIZE];
}
// Make sure all the threads are syncronized
__syncthreads();
// Apply the stencil
int result = 0;
for(int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lIdx + offset];
}
// Store the output
out[gIdx] = result;
}
// main
int main(void)
{
int *a, *b;
int *d_a, *d_b;
int size = TOTAL_DATA * sizeof(int);
int i;
// Allocate memory for the Host
a = (int *) malloc (size);
b = (int *) malloc (size);
// Allocate memory for the Device
cudaMalloc ((void **) &d_a, size);
cudaMalloc ((void **) &d_b, size);
// Initialize data (0 - 9)
for(i=0; i<TOTAL_DATA;i++) {
a[i] = rand() % 10;
}
// Copy the data to
cudaMemcpy (a, d_a, size, cudaMemcpyHostToDevice);
// Lets execute it
stensil_1d<<<TOTAL_DATA/BLOCK_SIZE, BLOCK_SIZE>>> (d_a, d_b);
cudaMemcpy (b, d_b, size, cudaMemcpyDeviceToHost);
// Print the outcome
int j;
for(i=0;i<TOTAL_DATA;i++) {
printf("[%3d]\t", i);
for(j=0;j<2*RADIUS + 1;j++) printf("%d,", a[i+j]);
printf("\t--> %d\n", b[i]);
}
cudaFree (d_a);
cudaFree (d_b);
free (a);
free (b);
cudaGetDeviceCount (&j);
printf("Total Device = %d\n", j);
return 0;
}
|
bad8de45f1353747dd5bd4e0f2feb24c143cd492.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_setAllPointsToRemove(bool *d_markers, int number_of_points)
{
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<number_of_points)
{
d_markers[ind] = false;
}
} | bad8de45f1353747dd5bd4e0f2feb24c143cd492.cu | #include "includes.h"
__global__ void kernel_setAllPointsToRemove(bool *d_markers, int number_of_points)
{
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<number_of_points)
{
d_markers[ind] = false;
}
} |
a6f97745b343c24dd28902850d7aea3a6863dbba.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/ATen.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return ::max(
::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlignForwardCUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output;
}
at::Tensor ROIAlignBackwardCUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = rois.type().tensor({b_size, channels, height, width}).zero_();
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return grad_in;
}
| a6f97745b343c24dd28902850d7aea3a6863dbba.cu | #include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return std::max(
std::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlignForwardCUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] {
RoIAlignForwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output;
}
at::Tensor ROIAlignBackwardCUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = rois.type().tensor({b_size, channels, height, width}).zero_();
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] {
RoIAlignBackwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return grad_in;
}
|
9af3b863f393f19fcf25f5a189581f35c864e91e.hip | // !!! This is a file automatically generated by hipify!!!
//
// Antnio Rui Borges
//
// ACA 2020/2021
//
// Reference implementation
//
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "common.h"
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
#ifndef N
# define N (1 << 16)
#endif
static void cyclicCircConv_cpu_kernel (float *x, float *y, float *xy, unsigned int nSamp);
__global__ static void cyclicCircConv_cuda_kernel (float *x, float *y, float *xy, unsigned int nSamp);
static double get_delta_time(void);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main (int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
int i;
hipDeviceProp_t deviceProp;
CHECK (hipGetDeviceProperties (&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK (hipSetDevice (dev));
// create memory areas in host and device memory where the signals and their ccyclic circular convolution will be stored
float *host_x, *host_y, *host_xy;
host_x = (float *) malloc (N * sizeof (float));
host_y = (float *) malloc (N * sizeof (float));
host_xy = (float *) malloc (N * sizeof (float));
// initialize signal datacyclicCircConv_cuda_kernel
(void) get_delta_time ();
srand(0xACA2020);
for (i = 0; i < N; i++)
{ host_x[i] = (float) ((double) rand () / RAND_MAX - 0.5);
host_y[i] = (float) ((double) rand () / RAND_MAX - 0.5);
}
printf ("The initialization of host data took %.3e seconds\n",get_delta_time ());
// create memory areas in device memory and copy the host data to the device memory
float *device_x, *device_y, *device_xy;;
(void) get_delta_time ();
CHECK (hipMalloc ((void **) &device_x, N * sizeof (float)));
CHECK (hipMalloc ((void **) &device_y, N * sizeof (float)));
CHECK (hipMalloc ((void **) &device_xy, N * sizeof (float)));
CHECK (hipMemcpy (device_x, host_x, N * sizeof (float), hipMemcpyHostToDevice));
CHECK (hipMemcpy (device_y, host_y, N * sizeof (float), hipMemcpyHostToDevice));
printf ("The creation and transfer of %ld bytes from the host to the device took %.3e seconds\n",
(long) N * sizeof (float), get_delta_time ());
// run the computational kernel in the GPU
// as an example, N thread blocks are launched where each thread block deals solely with one convolution point
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
blockDimX = 1 << 6; // optimize!
blockDimY = 1 << 1; // optimize!
blockDimZ = 1 << 1; // optimize!
gridDimX = 1 << 5; // optimize!
gridDimY = 1 << 3; // optimize!
gridDimZ = 1 << 0; // optimize!
if ((blockDimX * blockDimY * blockDimZ * gridDimX * gridDimY * gridDimZ) != N)
{ fprintf (stderr,"Wrong launch configuration!\n");
exit (1);
}
dim3 grid (gridDimX, gridDimY, gridDimZ);
dim3 block (blockDimX, blockDimY, blockDimZ);
(void) get_delta_time ();
hipLaunchKernelGGL(( cyclicCircConv_cuda_kernel) , dim3(grid), dim3(block), 0, 0, device_x, device_y, device_xy, (unsigned int) N);
CHECK (hipDeviceSynchronize ()); // wait for kernel to finish
CHECK (hipGetLastError ()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time ());
// copy kernel result back to host side
float *modified_device_xy;
modified_device_xy = (float *) malloc (N * sizeof (float));
CHECK (hipMemcpy (modified_device_xy, device_xy, N * sizeof (float), hipMemcpyDeviceToHost));
printf ("The transfer of %ld bytes from the device to the host took %.3e seconds\n",
(long) N * sizeof (float), get_delta_time ());
// free device global memory
CHECK (hipFree (device_x));
CHECK (hipFree (device_y));
CHECK (hipFree (device_xy));
// reset device
CHECK (hipDeviceReset ());
// run the computational kernel in the CPU
(void) get_delta_time ();
cyclicCircConv_cpu_kernel (host_x, host_y, host_xy, (unsigned int) N);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time ());
// compare
for(i = 0; i < N; i++)
if ((fabsf (host_xy[i]) < 1e-3) && (fabsf (modified_device_xy[i]) > 1.01e-3))
{ printf ("Mismatch in sample point %d: cpu %.3e - gpu %.3e\n", i, host_xy[i], modified_device_xy[i]);
exit(1);
}
else if (fabsf ((host_xy[i] - modified_device_xy[i]) / host_xy[i]) >= 5e-2)
{ printf ("Mismatch in sample point %d: cpu %.3e - gpu %.3e\n", i, host_xy[i], modified_device_xy[i]);
exit(1);
}
printf ("All is well!\n");
// free host memory
free (host_x);
free (host_y);
free (host_xy);
free (modified_device_xy);
return 0;
}
static void cyclicCircConv_cpu_kernel (float *x, float *y, float *xy, unsigned int nSamp)
{
unsigned int i, k;
float tmp;
for (i = 0; i < nSamp; i++)
{ tmp = 0.0;
for (k = 0; k < nSamp; k++)
tmp += x[k] * y[(i + k) % nSamp];
xy[i] += tmp;
}
}
__global__ static void cyclicCircConv_cuda_kernel (float *xx, float *yy, float *xxyy, unsigned int nSamp)
{
unsigned int k, x, y, z, idx;
float tmp;
// compute the thread number
x = (unsigned int) threadIdx.x + (unsigned int) blockDim.x * (unsigned int) blockIdx.x;
y = (unsigned int) threadIdx.y + (unsigned int) blockDim.y * (unsigned int) blockIdx.y;
z = (unsigned int) threadIdx.z + (unsigned int) blockDim.z * (unsigned int) blockIdx.z;
idx = (unsigned int) blockDim.y * (unsigned int) gridDim.y * (unsigned int) blockDim.x * (unsigned int) gridDim.x * z +
(unsigned int) blockDim.x * (unsigned int) gridDim.x * y + x;
if (idx >= nSamp)
{ printf ("Out of the data array: %u!\n", idx);
return; // safety precaution
}
tmp = 0.0;
for (k = 0; k < nSamp; k++)
tmp += xx[k] * yy[(idx + k) % nSamp];
xxyy[idx] += tmp;
}
static double get_delta_time(void)
{
static struct timespec t0,t1;
t0 = t1;
if(clock_gettime(CLOCK_MONOTONIC,&t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
| 9af3b863f393f19fcf25f5a189581f35c864e91e.cu | //
// António Rui Borges
//
// ACA 2020/2021
//
// Reference implementation
//
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "common.h"
#include <cuda_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
#ifndef N
# define N (1 << 16)
#endif
static void cyclicCircConv_cpu_kernel (float *x, float *y, float *xy, unsigned int nSamp);
__global__ static void cyclicCircConv_cuda_kernel (float *x, float *y, float *xy, unsigned int nSamp);
static double get_delta_time(void);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main (int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
int i;
cudaDeviceProp deviceProp;
CHECK (cudaGetDeviceProperties (&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK (cudaSetDevice (dev));
// create memory areas in host and device memory where the signals and their ccyclic circular convolution will be stored
float *host_x, *host_y, *host_xy;
host_x = (float *) malloc (N * sizeof (float));
host_y = (float *) malloc (N * sizeof (float));
host_xy = (float *) malloc (N * sizeof (float));
// initialize signal datacyclicCircConv_cuda_kernel
(void) get_delta_time ();
srand(0xACA2020);
for (i = 0; i < N; i++)
{ host_x[i] = (float) ((double) rand () / RAND_MAX - 0.5);
host_y[i] = (float) ((double) rand () / RAND_MAX - 0.5);
}
printf ("The initialization of host data took %.3e seconds\n",get_delta_time ());
// create memory areas in device memory and copy the host data to the device memory
float *device_x, *device_y, *device_xy;;
(void) get_delta_time ();
CHECK (cudaMalloc ((void **) &device_x, N * sizeof (float)));
CHECK (cudaMalloc ((void **) &device_y, N * sizeof (float)));
CHECK (cudaMalloc ((void **) &device_xy, N * sizeof (float)));
CHECK (cudaMemcpy (device_x, host_x, N * sizeof (float), cudaMemcpyHostToDevice));
CHECK (cudaMemcpy (device_y, host_y, N * sizeof (float), cudaMemcpyHostToDevice));
printf ("The creation and transfer of %ld bytes from the host to the device took %.3e seconds\n",
(long) N * sizeof (float), get_delta_time ());
// run the computational kernel in the GPU
// as an example, N thread blocks are launched where each thread block deals solely with one convolution point
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
blockDimX = 1 << 6; // optimize!
blockDimY = 1 << 1; // optimize!
blockDimZ = 1 << 1; // optimize!
gridDimX = 1 << 5; // optimize!
gridDimY = 1 << 3; // optimize!
gridDimZ = 1 << 0; // optimize!
if ((blockDimX * blockDimY * blockDimZ * gridDimX * gridDimY * gridDimZ) != N)
{ fprintf (stderr,"Wrong launch configuration!\n");
exit (1);
}
dim3 grid (gridDimX, gridDimY, gridDimZ);
dim3 block (blockDimX, blockDimY, blockDimZ);
(void) get_delta_time ();
cyclicCircConv_cuda_kernel <<<grid, block>>> (device_x, device_y, device_xy, (unsigned int) N);
CHECK (cudaDeviceSynchronize ()); // wait for kernel to finish
CHECK (cudaGetLastError ()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time ());
// copy kernel result back to host side
float *modified_device_xy;
modified_device_xy = (float *) malloc (N * sizeof (float));
CHECK (cudaMemcpy (modified_device_xy, device_xy, N * sizeof (float), cudaMemcpyDeviceToHost));
printf ("The transfer of %ld bytes from the device to the host took %.3e seconds\n",
(long) N * sizeof (float), get_delta_time ());
// free device global memory
CHECK (cudaFree (device_x));
CHECK (cudaFree (device_y));
CHECK (cudaFree (device_xy));
// reset device
CHECK (cudaDeviceReset ());
// run the computational kernel in the CPU
(void) get_delta_time ();
cyclicCircConv_cpu_kernel (host_x, host_y, host_xy, (unsigned int) N);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time ());
// compare
for(i = 0; i < N; i++)
if ((fabsf (host_xy[i]) < 1e-3) && (fabsf (modified_device_xy[i]) > 1.01e-3))
{ printf ("Mismatch in sample point %d: cpu %.3e - gpu %.3e\n", i, host_xy[i], modified_device_xy[i]);
exit(1);
}
else if (fabsf ((host_xy[i] - modified_device_xy[i]) / host_xy[i]) >= 5e-2)
{ printf ("Mismatch in sample point %d: cpu %.3e - gpu %.3e\n", i, host_xy[i], modified_device_xy[i]);
exit(1);
}
printf ("All is well!\n");
// free host memory
free (host_x);
free (host_y);
free (host_xy);
free (modified_device_xy);
return 0;
}
static void cyclicCircConv_cpu_kernel (float *x, float *y, float *xy, unsigned int nSamp)
{
unsigned int i, k;
float tmp;
for (i = 0; i < nSamp; i++)
{ tmp = 0.0;
for (k = 0; k < nSamp; k++)
tmp += x[k] * y[(i + k) % nSamp];
xy[i] += tmp;
}
}
__global__ static void cyclicCircConv_cuda_kernel (float *xx, float *yy, float *xxyy, unsigned int nSamp)
{
unsigned int k, x, y, z, idx;
float tmp;
// compute the thread number
x = (unsigned int) threadIdx.x + (unsigned int) blockDim.x * (unsigned int) blockIdx.x;
y = (unsigned int) threadIdx.y + (unsigned int) blockDim.y * (unsigned int) blockIdx.y;
z = (unsigned int) threadIdx.z + (unsigned int) blockDim.z * (unsigned int) blockIdx.z;
idx = (unsigned int) blockDim.y * (unsigned int) gridDim.y * (unsigned int) blockDim.x * (unsigned int) gridDim.x * z +
(unsigned int) blockDim.x * (unsigned int) gridDim.x * y + x;
if (idx >= nSamp)
{ printf ("Out of the data array: %u!\n", idx);
return; // safety precaution
}
tmp = 0.0;
for (k = 0; k < nSamp; k++)
tmp += xx[k] * yy[(idx + k) % nSamp];
xxyy[idx] += tmp;
}
static double get_delta_time(void)
{
static struct timespec t0,t1;
t0 = t1;
if(clock_gettime(CLOCK_MONOTONIC,&t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
|
05c683534a8ed0b2fcff55af43a29bcf4e1d8e09.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <cufinufft_eitherprec.h>
#include "../contrib/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int N1, N2, M, N, ntransf, maxbatchsize;
if (argc<4) {
fprintf(stderr,
"Usage: cufinufft2d1many_test method N1 N2 [ntransf [maxbatchsize [M [tol]]]]\n"
"Arguments:\n"
" method: One of\n"
" 1: nupts driven,\n"
" 2: sub-problem, or\n"
" 3: sub-problem with Paul's idea.\n"
" N1, N2: The size of the 2D array.\n"
" ntransf: Number of inputs (default 2 ^ 27 / (N1 * N2)).\n"
" maxbatchsize: Number of simultaneous transforms (or 0 for default).\n"
" M: The number of non-uniform points (default N1 * N2).\n"
" tol: NUFFT tolerance (default 1e-6).\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
sscanf(argv[2],"%lf",&w); N1 = (int)w; // so can read 1e6 right!
sscanf(argv[3],"%lf",&w); N2 = (int)w; // so can read 1e6 right!
N = N1*N2;
M = N1*N2*2;// let density always be 2
ntransf = pow(2,28)/M;
if(argc>4){
sscanf(argv[4],"%d",&ntransf);
}
maxbatchsize = 0; // default (cufinufft chooses)
if(argc>5){
sscanf(argv[5],"%d",&maxbatchsize);
}
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int iflag=1;
cout<<scientific<<setprecision(3);
int ier;
printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N, ntransf, M);
FLT *x, *y;
CPX *c, *fk;
hipHostMalloc(&x, M*sizeof(FLT));
hipHostMalloc(&y, M*sizeof(FLT));
hipHostMalloc(&c, M*ntransf*sizeof(CPX));
hipHostMalloc(&fk,N1*N2*ntransf*sizeof(CPX));
FLT *d_x, *d_y;
CUCPX *d_c, *d_fk;
checkCudaErrors(hipMalloc(&d_x,M*sizeof(FLT)));
checkCudaErrors(hipMalloc(&d_y,M*sizeof(FLT)));
checkCudaErrors(hipMalloc(&d_c,M*ntransf*sizeof(CUCPX)));
checkCudaErrors(hipMalloc(&d_fk,N1*N2*ntransf*sizeof(CUCPX)));
// Making data
for (int i=0; i<M; i++) {
x[i] = M_PI*randm11();// x in [-pi,pi)
y[i] = M_PI*randm11();
}
for(int i=0; i<M*ntransf; i++){
c[i].real(randm11());
c[i].imag(randm11());
}
checkCudaErrors(hipMemcpy(d_x,x,M*sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y,y,M*sizeof(FLT),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_c,c,M*ntransf*sizeof(CUCPX),hipMemcpyHostToDevice));
hipEvent_t start, stop;
float milliseconds = 0;
double totaltime = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
// warm up CUFFT (is slow, takes around 0.2 sec... )
hipEventRecord(start);
{
int nf1=1;
hipfftHandle fftplan;
hipfftPlan1d(&fftplan,nf1,CUFFT_TYPE,1);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds/1000);
// now to the test...
CUFINUFFT_PLAN dplan;
int dim = 2;
int type = 1;
// Here we setup our own opts, for gpu_method.
cufinufft_opts opts;
ier=CUFINUFFT_DEFAULT_OPTS(type, dim, &opts);
if(ier!=0){
printf("err %d: CUFINUFFT_DEFAULT_OPTS\n", ier);
return ier;
}
opts.gpu_method=method;
int nmodes[3];
nmodes[0] = N1;
nmodes[1] = N2;
nmodes[2] = 1;
hipEventRecord(start);
ier=CUFINUFFT_MAKEPLAN(type, dim, nmodes, iflag, ntransf, tol,
maxbatchsize, &dplan, &opts);
if (ier!=0){
printf("err: cufinufft2d_plan\n");
return ier;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds/1000);
hipEventRecord(start);
ier=CUFINUFFT_SETPTS(M, d_x, d_y, NULL, 0, NULL, NULL, NULL, dplan);
if (ier!=0){
printf("err: cufinufft_setpts\n");
return ier;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds/1000);
hipEventRecord(start);
ier=CUFINUFFT_EXECUTE(d_c, d_fk, dplan);
if (ier!=0){
printf("err: cufinufft2d1_exec\n");
return ier;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
float exec_ms = milliseconds;
totaltime += milliseconds;
printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds/1000);
hipEventRecord(start);
ier=CUFINUFFT_DESTROY(dplan);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds/1000);
checkCudaErrors(hipMemcpy(fk,d_fk,N1*N2*ntransf*sizeof(CUCPX),
hipMemcpyDeviceToHost));
int i = ntransf-1; // // choose some data to check
int nt1 = (int)(0.37*N1), nt2 = (int)(0.26*N2); // choose some mode index to check
CPX Ft = CPX(0,0), J = IMA*(FLT)iflag;
for (BIGINT j=0; j<M; ++j)
Ft += c[j+i*M] * exp(J*(nt1*x[j]+nt2*y[j])); // crude direct
int it = N1/2+nt1 + N1*(N2/2+nt2); // index in complex F as 1d array
// printf("[gpu ] %dth data one mode: abs err in F[%ld,%ld] is %.3g\n",(int)i, (int)nt1,(int)nt2,abs(Ft-fk[it+i*N]));
printf("[gpu ] %dth data one mode: rel err in F[%ld,%ld] is %.3g\n",(int)i, (int)nt1,(int)nt2,abs(Ft-fk[it+i*N])/infnorm(N,fk+i*N));
printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime*1000, M*ntransf/totaltime*1000);
printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n",M*ntransf/exec_ms*1000);
hipHostFree(x);
hipHostFree(y);
hipHostFree(c);
hipHostFree(fk);
return 0;
}
| 05c683534a8ed0b2fcff55af43a29bcf4e1d8e09.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <cufinufft_eitherprec.h>
#include "../contrib/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int N1, N2, M, N, ntransf, maxbatchsize;
if (argc<4) {
fprintf(stderr,
"Usage: cufinufft2d1many_test method N1 N2 [ntransf [maxbatchsize [M [tol]]]]\n"
"Arguments:\n"
" method: One of\n"
" 1: nupts driven,\n"
" 2: sub-problem, or\n"
" 3: sub-problem with Paul's idea.\n"
" N1, N2: The size of the 2D array.\n"
" ntransf: Number of inputs (default 2 ^ 27 / (N1 * N2)).\n"
" maxbatchsize: Number of simultaneous transforms (or 0 for default).\n"
" M: The number of non-uniform points (default N1 * N2).\n"
" tol: NUFFT tolerance (default 1e-6).\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
sscanf(argv[2],"%lf",&w); N1 = (int)w; // so can read 1e6 right!
sscanf(argv[3],"%lf",&w); N2 = (int)w; // so can read 1e6 right!
N = N1*N2;
M = N1*N2*2;// let density always be 2
ntransf = pow(2,28)/M;
if(argc>4){
sscanf(argv[4],"%d",&ntransf);
}
maxbatchsize = 0; // default (cufinufft chooses)
if(argc>5){
sscanf(argv[5],"%d",&maxbatchsize);
}
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int iflag=1;
cout<<scientific<<setprecision(3);
int ier;
printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N, ntransf, M);
FLT *x, *y;
CPX *c, *fk;
cudaMallocHost(&x, M*sizeof(FLT));
cudaMallocHost(&y, M*sizeof(FLT));
cudaMallocHost(&c, M*ntransf*sizeof(CPX));
cudaMallocHost(&fk,N1*N2*ntransf*sizeof(CPX));
FLT *d_x, *d_y;
CUCPX *d_c, *d_fk;
checkCudaErrors(cudaMalloc(&d_x,M*sizeof(FLT)));
checkCudaErrors(cudaMalloc(&d_y,M*sizeof(FLT)));
checkCudaErrors(cudaMalloc(&d_c,M*ntransf*sizeof(CUCPX)));
checkCudaErrors(cudaMalloc(&d_fk,N1*N2*ntransf*sizeof(CUCPX)));
// Making data
for (int i=0; i<M; i++) {
x[i] = M_PI*randm11();// x in [-pi,pi)
y[i] = M_PI*randm11();
}
for(int i=0; i<M*ntransf; i++){
c[i].real(randm11());
c[i].imag(randm11());
}
checkCudaErrors(cudaMemcpy(d_x,x,M*sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y,y,M*sizeof(FLT),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_c,c,M*ntransf*sizeof(CUCPX),cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
float milliseconds = 0;
double totaltime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// warm up CUFFT (is slow, takes around 0.2 sec... )
cudaEventRecord(start);
{
int nf1=1;
cufftHandle fftplan;
cufftPlan1d(&fftplan,nf1,CUFFT_TYPE,1);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds/1000);
// now to the test...
CUFINUFFT_PLAN dplan;
int dim = 2;
int type = 1;
// Here we setup our own opts, for gpu_method.
cufinufft_opts opts;
ier=CUFINUFFT_DEFAULT_OPTS(type, dim, &opts);
if(ier!=0){
printf("err %d: CUFINUFFT_DEFAULT_OPTS\n", ier);
return ier;
}
opts.gpu_method=method;
int nmodes[3];
nmodes[0] = N1;
nmodes[1] = N2;
nmodes[2] = 1;
cudaEventRecord(start);
ier=CUFINUFFT_MAKEPLAN(type, dim, nmodes, iflag, ntransf, tol,
maxbatchsize, &dplan, &opts);
if (ier!=0){
printf("err: cufinufft2d_plan\n");
return ier;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds/1000);
cudaEventRecord(start);
ier=CUFINUFFT_SETPTS(M, d_x, d_y, NULL, 0, NULL, NULL, NULL, dplan);
if (ier!=0){
printf("err: cufinufft_setpts\n");
return ier;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds/1000);
cudaEventRecord(start);
ier=CUFINUFFT_EXECUTE(d_c, d_fk, dplan);
if (ier!=0){
printf("err: cufinufft2d1_exec\n");
return ier;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
float exec_ms = milliseconds;
totaltime += milliseconds;
printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds/1000);
cudaEventRecord(start);
ier=CUFINUFFT_DESTROY(dplan);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
totaltime += milliseconds;
printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds/1000);
checkCudaErrors(cudaMemcpy(fk,d_fk,N1*N2*ntransf*sizeof(CUCPX),
cudaMemcpyDeviceToHost));
int i = ntransf-1; // // choose some data to check
int nt1 = (int)(0.37*N1), nt2 = (int)(0.26*N2); // choose some mode index to check
CPX Ft = CPX(0,0), J = IMA*(FLT)iflag;
for (BIGINT j=0; j<M; ++j)
Ft += c[j+i*M] * exp(J*(nt1*x[j]+nt2*y[j])); // crude direct
int it = N1/2+nt1 + N1*(N2/2+nt2); // index in complex F as 1d array
// printf("[gpu ] %dth data one mode: abs err in F[%ld,%ld] is %.3g\n",(int)i, (int)nt1,(int)nt2,abs(Ft-fk[it+i*N]));
printf("[gpu ] %dth data one mode: rel err in F[%ld,%ld] is %.3g\n",(int)i, (int)nt1,(int)nt2,abs(Ft-fk[it+i*N])/infnorm(N,fk+i*N));
printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime*1000, M*ntransf/totaltime*1000);
printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n",M*ntransf/exec_ms*1000);
cudaFreeHost(x);
cudaFreeHost(y);
cudaFreeHost(c);
cudaFreeHost(fk);
return 0;
}
|
cc904dab4a86a7ee7681912630a8d29b369d8511.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <boost/chrono.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "caffe/layers/gen_slicinfo_layer.hpp"
using namespace gSLICr;
using namespace cv;
using std::cout;
using std::endl;
namespace caffe {
// ----------------------------------------------------
//
// Image Space Conversion
//
// ----------------------------------------------------
template<typename Dtype>
__global__ void Cvt_Img_Space_device(const int nthreads, const Dtype* inimg,
Vector4f* outimg, Vector2i img_size, int color_space) {
CUDA_KERNEL_LOOP(index, nthreads) {
// input index : (n, h, w) => blob index : (n, c, h, w)
const int max_height = img_size.y;
const int max_width = img_size.x;
const int w = index % max_width;
const int h = (index / max_width) % max_height;
const int n = (index / max_width) / max_height;
const int b_index = ((n * 3 + 0) * max_height + h) * max_width + w;
const int g_index = ((n * 3 + 1) * max_height + h) * max_width + w;
const int r_index = ((n * 3 + 2) * max_height + h) * max_width + w;
float _b = (float)(inimg[b_index] + 104)* 0.0039216f;
// printf("_b = %f \n", _b);
float _g = (float)(inimg[g_index] + 117)* 0.0039216f;
float _r = (float)(inimg[r_index] + 123)* 0.0039216f;
float x = _r*0.412453f + _g*0.357580f + _b*0.180423f;
float y = _r*0.212671f + _g*0.715160f + _b*0.072169f;
float z = _r*0.019334f + _g*0.119193f + _b*0.950227f;
switch (color_space)
{
case gSLICr::XYZ:
outimg[index].x = x;
outimg[index].y = y;
outimg[index].z = z;
break;
case gSLICr::CIELAB:
float epsilon = 0.008856f; //actual CIE standard
float kappa = 903.3f; //actual CIE standard
float Xr = 0.950456f; //reference white
float Yr = 1.0f; //reference white
float Zr = 1.088754f; //reference white
float xr = x / Xr;
float yr = y / Yr;
float zr = z / Zr;
float fx, fy, fz;
if (xr > epsilon) fx = pow(xr, 1.0f / 3.0f);
else fx = (kappa*xr + 16.0f) / 116.0f;
if (yr > epsilon) fy = pow(yr, 1.0f / 3.0f);
else fy = (kappa*yr + 16.0f) / 116.0f;
if (zr > epsilon) fz = pow(zr, 1.0f / 3.0f);
else fz = (kappa*zr + 16.0f) / 116.0f;
outimg[index].x = 116.0f*fy - 16.0f;
outimg[index].y = 500.0f*(fx - fy);
outimg[index].z = 200.0f*(fy - fz);
break;
}
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Cvt_Img_Space(const Blob<Dtype>* inimg, Blob<Vector4f>* outimg,
int color_space) {
const Dtype* inimg_ptr = inimg->gpu_data();
Vector4f* outimg_ptr = outimg->mutable_gpu_data();
const int count = outimg->count();
hipLaunchKernelGGL(( Cvt_Img_Space_device), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, inimg_ptr, outimg_ptr, img_size_, color_space);
}
// ----------------------------------------------------
//
// Cluster Center Initialisation
//
// ----------------------------------------------------
__device__ inline void init_cluster_centers_shared(const Vector4f* inimg,
spixel_info* out_spixel, Vector2i map_size,
Vector2i img_size, int spixel_size, int x, int y) {
int cluster_idx = y * map_size.x + x;
int img_x = x * spixel_size + spixel_size / 2;
int img_y = y * spixel_size + spixel_size / 2;
img_x = img_x >= img_size.x ? (x * spixel_size + img_size.x) / 2 : img_x;
img_y = img_y >= img_size.y ? (y * spixel_size + img_size.y) / 2 : img_y;
// TODO: go one step towards gradients direction
out_spixel[cluster_idx].id = cluster_idx;
out_spixel[cluster_idx].center = Vector2f((float)img_x, (float)img_y);
out_spixel[cluster_idx].color_info = inimg[img_y*img_size.x + img_x];
out_spixel[cluster_idx].no_pixels = 0;
}
__global__ void Init_Cluster_Centers_device(const int nthreads, const Vector4f* inimg,
spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size) {
CUDA_KERNEL_LOOP(index, nthreads) { // index : N x 1 x map_size x map_size
const int x = index % map_size.x;
const int y = (index / map_size.x) % map_size.y;
const int n = (index / map_size.x) / map_size.y;
const int start_index_img_n = n*img_size.y*img_size.x;
const int start_index_map_n = n*map_size.y*map_size.x;
const Vector4f* inimg_n = &(inimg[start_index_img_n]);
spixel_info* out_spixel_n = &(out_spixel[start_index_map_n]);
init_cluster_centers_shared(inimg_n, out_spixel_n, map_size, img_size,
spixel_size, x, y);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Init_Cluster_Centers() {
spixel_info* spixel_list = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
const int count = spixel_map_.count();
hipLaunchKernelGGL(( Init_Cluster_Centers_device), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, img_ptr, spixel_list, map_size_, img_size_, spixel_size_);
}
// ----------------------------------------------------
//
// Finding the Cluster Associations
//
// ----------------------------------------------------
__device__ inline float compute_slic_distance(const Vector4f& pix, int x, int y,
const spixel_info& center_info, float weight, float normalizer_xy,
float normalizer_color) {
float dcolor = (pix.x - center_info.color_info.x)*(pix.x - center_info.color_info.x)
+ (pix.y - center_info.color_info.y)*(pix.y - center_info.color_info.y)
+ (pix.z - center_info.color_info.z)*(pix.z - center_info.color_info.z);
float dxy = (x - center_info.center.x) * (x - center_info.center.x)
+ (y - center_info.center.y) * (y - center_info.center.y);
float retval = dcolor * normalizer_color + weight * dxy * normalizer_xy;
return sqrtf(retval);
}
template<typename Dtype>
__device__ inline void find_center_association_shared(const Vector4f* inimg,
const spixel_info* in_spixel_map, Dtype* out_idx_img,
Vector2i map_size, Vector2i img_size, int spixel_size, float weight,
int n, int x, int y, float max_xy_dist, float max_color_dist) {
int idx_img = (n*img_size.y + y)*img_size.x + x;
int ctr_x = x / spixel_size;
int ctr_y = y / spixel_size;
Dtype minidx = -1;
float dist = 999999.9999f;
// search 3x3 neighborhood
for (int i = -1; i <= 1; i++) for (int j = -1; j <= 1; j++)
{
int ctr_x_check = ctr_x + j;
int ctr_y_check = ctr_y + i;
if (ctr_x_check >= 0 && ctr_y_check >= 0 && ctr_x_check < map_size.x &&
ctr_y_check < map_size.y) {
int ctr_idx = (n*map_size.y + ctr_y_check)*map_size.x + ctr_x_check;
float cdist = compute_slic_distance(inimg[idx_img], x, y, in_spixel_map[ctr_idx],
weight, max_xy_dist, max_color_dist);
if (cdist < dist) {
dist = cdist;
minidx = in_spixel_map[ctr_idx].id;
}
}
}
if (minidx >= 0) out_idx_img[idx_img] = minidx;
}
template<typename Dtype>
__global__ void Find_Center_Association_device(const int nthreads, const Vector4f* inimg,
const spixel_info* in_spixel_map, Dtype* out_idx_img, Vector2i map_size,
Vector2i img_size, int spixel_size, float weight, float max_xy_dist,
float max_color_dist) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index: (n, h, w)
const int x = index % img_size.x;
const int y = (index / img_size.x) % img_size.y;
const int n = (index / img_size.x) / img_size.y;
find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size,
spixel_size, weight, n, x, y, max_xy_dist, max_color_dist);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Find_Center_Association(Blob<Dtype>* idx_blob) {
spixel_info* spixel_list = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
Dtype* idx_ptr = idx_blob->mutable_gpu_data();
const int count = cvt_img_.count();
hipLaunchKernelGGL(( Find_Center_Association_device), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, img_ptr, spixel_list, idx_ptr, map_size_, img_size_, spixel_size_,
coh_weight_, max_xy_dist_, max_color_dist_);
}
// ----------------------------------------------------
//
// Updating the cluster center
//
// ----------------------------------------------------
template<typename Dtype>
__global__ void Update_Cluster_Center_device(const Vector4f* inimg,
const Dtype* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size,
int spixel_size, int no_blocks_per_line) {
int local_id = threadIdx.y * blockDim.x + threadIdx.x; // thread index of one superpixel
// blockIdx.x : image index(N), blockIdx.y : superpixel index(mapsize^2)
// blockIdx.z = block index(num_blocks_per_superpixel)
const int num_blocks_per_superpixel_ = gridDim.z;
const int BLOCK_DIM = BLOCK_SEARCH_RANGE_;
int spixel_id = blockIdx.y; // superpixel index
int spixel_index_x = spixel_id % map_size.x;
int spixel_index_y = spixel_id / map_size.x;
__shared__ Vector4f color_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ Vector2f xy_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ int count_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ bool should_add;
color_shared[local_id] = Vector4f(0, 0, 0, 0);
xy_shared[local_id] = Vector2f(0, 0);
count_shared[local_id] = 0;
should_add = false;
__syncthreads();
// compute the relative position in the search window
int block_x = blockIdx.z % no_blocks_per_line;
int block_y = blockIdx.z / no_blocks_per_line;
int x_offset = block_x * BLOCK_DIM + threadIdx.x;
int y_offset = block_y * BLOCK_DIM + threadIdx.y;
if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3)
{
// compute the start of the search window
int x_start = spixel_index_x * spixel_size - spixel_size;
int y_start = spixel_index_y * spixel_size - spixel_size;
int x_img = x_start + x_offset;
int y_img = y_start + y_offset;
if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y)
{
int img_idx = (blockIdx.x*img_size.y + y_img)*img_size.x + x_img;
if (in_idx_img[img_idx] == spixel_id)
{
color_shared[local_id] = inimg[img_idx];
xy_shared[local_id] = Vector2f(x_img, y_img);
count_shared[local_id] = 1;
should_add = true;
}
}
}
__syncthreads();
if (should_add)
{
if (local_id < 128)
{
color_shared[local_id] += color_shared[local_id + 128];
xy_shared[local_id] += xy_shared[local_id + 128];
count_shared[local_id] += count_shared[local_id + 128];
}
__syncthreads();
if (local_id < 64)
{
color_shared[local_id] += color_shared[local_id + 64];
xy_shared[local_id] += xy_shared[local_id + 64];
count_shared[local_id] += count_shared[local_id + 64];
}
__syncthreads();
if (local_id < 32)
{
color_shared[local_id] += color_shared[local_id + 32];
color_shared[local_id] += color_shared[local_id + 16];
color_shared[local_id] += color_shared[local_id + 8];
color_shared[local_id] += color_shared[local_id + 4];
color_shared[local_id] += color_shared[local_id + 2];
color_shared[local_id] += color_shared[local_id + 1];
xy_shared[local_id] += xy_shared[local_id + 32];
xy_shared[local_id] += xy_shared[local_id + 16];
xy_shared[local_id] += xy_shared[local_id + 8];
xy_shared[local_id] += xy_shared[local_id + 4];
xy_shared[local_id] += xy_shared[local_id + 2];
xy_shared[local_id] += xy_shared[local_id + 1];
count_shared[local_id] += count_shared[local_id + 32];
count_shared[local_id] += count_shared[local_id + 16];
count_shared[local_id] += count_shared[local_id + 8];
count_shared[local_id] += count_shared[local_id + 4];
count_shared[local_id] += count_shared[local_id + 2];
count_shared[local_id] += count_shared[local_id + 1];
}
}
__syncthreads();
if (local_id == 0)
{
const int n = blockIdx.x;
int accum_map_idx = ((n*num_blocks_per_superpixel_+blockIdx.z)*map_size.y +
spixel_index_y)*map_size.x + spixel_index_x;
accum_map[accum_map_idx].center = xy_shared[0];
accum_map[accum_map_idx].color_info = color_shared[0];
accum_map[accum_map_idx].no_pixels = count_shared[0];
}
}
__device__ inline void finalize_reduction_result_shared(
const spixel_info* accum_map,
spixel_info* spixel_list, Vector2i map_size,
int no_blocks_per_spixel, int x, int y, int n) {
int spixel_idx = (n*map_size.y + y)*map_size.x + x;
spixel_list[spixel_idx].center = Vector2f(0, 0);
spixel_list[spixel_idx].color_info = Vector4f(0, 0, 0, 0);
spixel_list[spixel_idx].no_pixels = 0;
for (int i = 0; i < no_blocks_per_spixel; i++)
{
int accum_list_idx = ((n*no_blocks_per_spixel + i)*map_size.y + y)*map_size.x + x;
spixel_list[spixel_idx].center += accum_map[accum_list_idx].center;
spixel_list[spixel_idx].color_info += accum_map[accum_list_idx].color_info;
spixel_list[spixel_idx].no_pixels += accum_map[accum_list_idx].no_pixels;
}
if (spixel_list[spixel_idx].no_pixels != 0)
{
spixel_list[spixel_idx].center /= (float)spixel_list[spixel_idx].no_pixels;
spixel_list[spixel_idx].color_info /= (float)spixel_list[spixel_idx].no_pixels;
}
}
__global__ void Finalize_Reduction_Result_device(const int nthreads,
const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size,
int no_blocks_per_spixel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index from (N, smap_x, smap_y)
const int x = index % map_size.x;
const int y = (index / map_size.x) % map_size.y;
const int n = (index / map_size.x) / map_size.y;
finalize_reduction_result_shared(accum_map, spixel_list, map_size,
no_blocks_per_spixel, x, y, n);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Update_Cluster_Center(Blob<Dtype>* idx_blob) {
spixel_info* accum_map_ptr = accum_map_.mutable_gpu_data();
spixel_info* spixel_list_ptr = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
const Dtype* idx_ptr = idx_blob->gpu_data();
const int N = idx_blob->shape(0);
int no_blocks_per_line = spixel_size_ * 3 / BLOCK_SEARCH_RANGE_;
// Cannot use Caffe thread setting (use shared variable)
dim3 blockSize(BLOCK_SEARCH_RANGE_, BLOCK_SEARCH_RANGE_);
dim3 gridSize(N, map_size_.x * map_size_.y, num_block_per_superpixel_);
// BLOCK_DIM: BLOCK_SEARCH_RANGE
const int count = spixel_map_.count();
hipLaunchKernelGGL(( Update_Cluster_Center_device), dim3(gridSize), dim3(blockSize), 0, 0, img_ptr, idx_ptr,
accum_map_ptr, map_size_, img_size_, spixel_size_, no_blocks_per_line);
hipLaunchKernelGGL(( Finalize_Reduction_Result_device), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, accum_map_ptr, spixel_list_ptr, map_size_, num_block_per_superpixel_);
}
// ----------------------------------------------------
//
// Enforce connectivity
//
// ----------------------------------------------------
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Enforce_Connectivity(Blob<Dtype>* idx_blob,
Blob<Dtype>* dead_blob)
{
const int N = idx_blob->shape(0);
// Merge superpixels which have too small number of pixels
// Indicate dead superpixels
for (int n = 0; n < N; n++) {
Dtype* idx_ptr = idx_blob->mutable_cpu_data() + idx_blob->offset(n, 0, 0, 0);
Dtype* dead_ptr = dead_blob->mutable_cpu_data() + dead_blob->offset(n, 0, 0, 0);
// loop superpixel index
for (int slic_yind = 0; slic_yind < map_size_.y; slic_yind++) {
for (int slic_xind = 0; slic_xind < map_size_.x; slic_xind++) {
int slic_index = slic_yind*map_size_.x + slic_xind;
const int hstart = max((slic_yind - 4) * spixel_size_, 0);
const int wstart = max((slic_xind - 4) * spixel_size_, 0);
const int hend = min((slic_yind + 4) * spixel_size_, img_size_.y);
const int wend = min((slic_xind + 4) * spixel_size_, img_size_.x);
int count = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (idx_ptr[h * img_size_.x + w] == slic_index) {
count++;
}
}
}
if (count < min_pixel_num_) {
// merge this superpixel with near other one.
dead_ptr[slic_index] = 1;
int new_slic_index;
if (slic_xind != map_size_.x - 1) {
new_slic_index = slic_index + 1;
} else if (slic_yind != map_size_.y - 1) {
new_slic_index = (slic_yind + 1)*map_size_.x + slic_xind;
} else {
new_slic_index = slic_index - 1;
}
// find surrounded labels and pick most similar color sp
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (idx_ptr[h * img_size_.x + w] == slic_index) {
idx_ptr[h * img_size_.x + w] = new_slic_index;
}
}
}
} else {
dead_ptr[slic_index] = 0;
}
}
}
}
}
template<typename Dtype>
void Visualize(Blob<Dtype>* top, int spixel_xdim, int spixel_ydim) {
// visualize... 3rd one?
int img_index = 0;
const Dtype* data = top->cpu_data();
Mat vismap(top->shape(2), top->shape(3), CV_8UC3);
for (int r = 0; r < top->shape(2); r++) {
for (int c = 0; c < top->shape(3); c++) {
int index = (img_index*top->shape(2) + r)*top->shape(3) + c;
Dtype value = data[index];
int slic_xindex = (int)value % spixel_xdim;
int slic_yindex = (int)value / spixel_xdim;
Vec3b color( slic_xindex/(float)spixel_xdim*255,
slic_yindex/(float)spixel_ydim*255, 0);
//Vec3b color( slic_xindex/(float)spixel_xdim*255,slic_xindex/(float)spixel_xdim*255,
// slic_xindex/(float)spixel_xdim*255);
vismap.at<Vec3b>(r, c) = color;
}
}
imshow("ahahaha", vismap);
waitKey(0);
}
template<typename Dtype>
void Visualize22(Blob<Dtype>* top, int spixel_xdim, int spixel_ydim, Blob<Dtype>* img) {
// visualize... 3rd one?
int img_index = 0;
const Dtype* data = top->cpu_data();
const Dtype* img_data = img->cpu_data();
Mat vismap(top->shape(2), top->shape(3), CV_8UC3);
int means[3] = {104, 117, 123};
for (int ch = 0; ch < 3; ch++) {
for (int r = 0; r < top->shape(2); r++) {
for (int c = 0; c < top->shape(3); c++) {
vismap.at<Vec3b>(r, c)[ch] = static_cast<uchar>(means[ch] +
*(img_data + ((img_index * 3 + ch)*top->shape(2) + r )*top->shape(3) + c));
}
}
}
for (int r = 1; r < top->shape(2)-1; r++) {
for (int c = 1; c < top->shape(3)-1; c++) {
int index = (img_index*top->shape(2) + r)*top->shape(3) + c;
Dtype value = data[index];
bool boundary = false;
for (int i = -1; i <= 0; i++) {
for (int j = -1; j <= 0; j++) {
if (i == 0 && j == 0) {
} else {
int tmp_index = (img_index*top->shape(2) + (r+i))
*top->shape(3) + (c+j);
Dtype val2 = data[tmp_index];
if (val2 != value) {
boundary = true;
}
}
}
}
if (boundary) {
Vec3b color(0,0,255);
vismap.at<Vec3b>(r, c) = color;
}
}
}
imshow("ahahaha2", vismap);
waitKey(0);
}
// ---------------------------------------------------
//
// Caffe forward implementation
//
// ---------------------------------------------------
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
boost::chrono::system_clock::time_point start;
start = boost::chrono::system_clock::now();
// Convert Image space from BGR to XYZ
Cvt_Img_Space(bottom[0], &cvt_img_, COLOR_CHAN_);
Init_Cluster_Centers();
Find_Center_Association(top[0]);
//Visualize(top[0], map_size_.x, map_size_.y);
for (int i = 0; i < 3; i++) { // TODO: iter_num hardcoded
Update_Cluster_Center(top[0]);
Find_Center_Association(top[0]);
}
Enforce_Connectivity(top[0], top[1]);// <- need to solve slic index problem.
//Visualize(top[0], map_size_.x, map_size_.y);
//Visualize22(top[0], map_size_.x, map_size_.y, bottom[0]);
// synchronize?
// hipDeviceSynchronize();
boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
//LOG(INFO) << "time : " << sec.count() << "s";
}
INSTANTIATE_LAYER_GPU_FUNCS(GenSlicinfoLayer);
}
| cc904dab4a86a7ee7681912630a8d29b369d8511.cu | #include <stdio.h>
#include <iostream>
#include <boost/chrono.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "caffe/layers/gen_slicinfo_layer.hpp"
using namespace gSLICr;
using namespace cv;
using std::cout;
using std::endl;
namespace caffe {
// ----------------------------------------------------
//
// Image Space Conversion
//
// ----------------------------------------------------
template<typename Dtype>
__global__ void Cvt_Img_Space_device(const int nthreads, const Dtype* inimg,
Vector4f* outimg, Vector2i img_size, int color_space) {
CUDA_KERNEL_LOOP(index, nthreads) {
// input index : (n, h, w) => blob index : (n, c, h, w)
const int max_height = img_size.y;
const int max_width = img_size.x;
const int w = index % max_width;
const int h = (index / max_width) % max_height;
const int n = (index / max_width) / max_height;
const int b_index = ((n * 3 + 0) * max_height + h) * max_width + w;
const int g_index = ((n * 3 + 1) * max_height + h) * max_width + w;
const int r_index = ((n * 3 + 2) * max_height + h) * max_width + w;
float _b = (float)(inimg[b_index] + 104)* 0.0039216f;
// printf("_b = %f \n", _b);
float _g = (float)(inimg[g_index] + 117)* 0.0039216f;
float _r = (float)(inimg[r_index] + 123)* 0.0039216f;
float x = _r*0.412453f + _g*0.357580f + _b*0.180423f;
float y = _r*0.212671f + _g*0.715160f + _b*0.072169f;
float z = _r*0.019334f + _g*0.119193f + _b*0.950227f;
switch (color_space)
{
case gSLICr::XYZ:
outimg[index].x = x;
outimg[index].y = y;
outimg[index].z = z;
break;
case gSLICr::CIELAB:
float epsilon = 0.008856f; //actual CIE standard
float kappa = 903.3f; //actual CIE standard
float Xr = 0.950456f; //reference white
float Yr = 1.0f; //reference white
float Zr = 1.088754f; //reference white
float xr = x / Xr;
float yr = y / Yr;
float zr = z / Zr;
float fx, fy, fz;
if (xr > epsilon) fx = pow(xr, 1.0f / 3.0f);
else fx = (kappa*xr + 16.0f) / 116.0f;
if (yr > epsilon) fy = pow(yr, 1.0f / 3.0f);
else fy = (kappa*yr + 16.0f) / 116.0f;
if (zr > epsilon) fz = pow(zr, 1.0f / 3.0f);
else fz = (kappa*zr + 16.0f) / 116.0f;
outimg[index].x = 116.0f*fy - 16.0f;
outimg[index].y = 500.0f*(fx - fy);
outimg[index].z = 200.0f*(fy - fz);
break;
}
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Cvt_Img_Space(const Blob<Dtype>* inimg, Blob<Vector4f>* outimg,
int color_space) {
const Dtype* inimg_ptr = inimg->gpu_data();
Vector4f* outimg_ptr = outimg->mutable_gpu_data();
const int count = outimg->count();
Cvt_Img_Space_device<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, inimg_ptr, outimg_ptr, img_size_, color_space);
}
// ----------------------------------------------------
//
// Cluster Center Initialisation
//
// ----------------------------------------------------
__device__ inline void init_cluster_centers_shared(const Vector4f* inimg,
spixel_info* out_spixel, Vector2i map_size,
Vector2i img_size, int spixel_size, int x, int y) {
int cluster_idx = y * map_size.x + x;
int img_x = x * spixel_size + spixel_size / 2;
int img_y = y * spixel_size + spixel_size / 2;
img_x = img_x >= img_size.x ? (x * spixel_size + img_size.x) / 2 : img_x;
img_y = img_y >= img_size.y ? (y * spixel_size + img_size.y) / 2 : img_y;
// TODO: go one step towards gradients direction
out_spixel[cluster_idx].id = cluster_idx;
out_spixel[cluster_idx].center = Vector2f((float)img_x, (float)img_y);
out_spixel[cluster_idx].color_info = inimg[img_y*img_size.x + img_x];
out_spixel[cluster_idx].no_pixels = 0;
}
__global__ void Init_Cluster_Centers_device(const int nthreads, const Vector4f* inimg,
spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size) {
CUDA_KERNEL_LOOP(index, nthreads) { // index : N x 1 x map_size x map_size
const int x = index % map_size.x;
const int y = (index / map_size.x) % map_size.y;
const int n = (index / map_size.x) / map_size.y;
const int start_index_img_n = n*img_size.y*img_size.x;
const int start_index_map_n = n*map_size.y*map_size.x;
const Vector4f* inimg_n = &(inimg[start_index_img_n]);
spixel_info* out_spixel_n = &(out_spixel[start_index_map_n]);
init_cluster_centers_shared(inimg_n, out_spixel_n, map_size, img_size,
spixel_size, x, y);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Init_Cluster_Centers() {
spixel_info* spixel_list = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
const int count = spixel_map_.count();
Init_Cluster_Centers_device<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, img_ptr, spixel_list, map_size_, img_size_, spixel_size_);
}
// ----------------------------------------------------
//
// Finding the Cluster Associations
//
// ----------------------------------------------------
__device__ inline float compute_slic_distance(const Vector4f& pix, int x, int y,
const spixel_info& center_info, float weight, float normalizer_xy,
float normalizer_color) {
float dcolor = (pix.x - center_info.color_info.x)*(pix.x - center_info.color_info.x)
+ (pix.y - center_info.color_info.y)*(pix.y - center_info.color_info.y)
+ (pix.z - center_info.color_info.z)*(pix.z - center_info.color_info.z);
float dxy = (x - center_info.center.x) * (x - center_info.center.x)
+ (y - center_info.center.y) * (y - center_info.center.y);
float retval = dcolor * normalizer_color + weight * dxy * normalizer_xy;
return sqrtf(retval);
}
template<typename Dtype>
__device__ inline void find_center_association_shared(const Vector4f* inimg,
const spixel_info* in_spixel_map, Dtype* out_idx_img,
Vector2i map_size, Vector2i img_size, int spixel_size, float weight,
int n, int x, int y, float max_xy_dist, float max_color_dist) {
int idx_img = (n*img_size.y + y)*img_size.x + x;
int ctr_x = x / spixel_size;
int ctr_y = y / spixel_size;
Dtype minidx = -1;
float dist = 999999.9999f;
// search 3x3 neighborhood
for (int i = -1; i <= 1; i++) for (int j = -1; j <= 1; j++)
{
int ctr_x_check = ctr_x + j;
int ctr_y_check = ctr_y + i;
if (ctr_x_check >= 0 && ctr_y_check >= 0 && ctr_x_check < map_size.x &&
ctr_y_check < map_size.y) {
int ctr_idx = (n*map_size.y + ctr_y_check)*map_size.x + ctr_x_check;
float cdist = compute_slic_distance(inimg[idx_img], x, y, in_spixel_map[ctr_idx],
weight, max_xy_dist, max_color_dist);
if (cdist < dist) {
dist = cdist;
minidx = in_spixel_map[ctr_idx].id;
}
}
}
if (minidx >= 0) out_idx_img[idx_img] = minidx;
}
template<typename Dtype>
__global__ void Find_Center_Association_device(const int nthreads, const Vector4f* inimg,
const spixel_info* in_spixel_map, Dtype* out_idx_img, Vector2i map_size,
Vector2i img_size, int spixel_size, float weight, float max_xy_dist,
float max_color_dist) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index: (n, h, w)
const int x = index % img_size.x;
const int y = (index / img_size.x) % img_size.y;
const int n = (index / img_size.x) / img_size.y;
find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size,
spixel_size, weight, n, x, y, max_xy_dist, max_color_dist);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Find_Center_Association(Blob<Dtype>* idx_blob) {
spixel_info* spixel_list = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
Dtype* idx_ptr = idx_blob->mutable_gpu_data();
const int count = cvt_img_.count();
Find_Center_Association_device<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, img_ptr, spixel_list, idx_ptr, map_size_, img_size_, spixel_size_,
coh_weight_, max_xy_dist_, max_color_dist_);
}
// ----------------------------------------------------
//
// Updating the cluster center
//
// ----------------------------------------------------
template<typename Dtype>
__global__ void Update_Cluster_Center_device(const Vector4f* inimg,
const Dtype* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size,
int spixel_size, int no_blocks_per_line) {
int local_id = threadIdx.y * blockDim.x + threadIdx.x; // thread index of one superpixel
// blockIdx.x : image index(N), blockIdx.y : superpixel index(mapsize^2)
// blockIdx.z = block index(num_blocks_per_superpixel)
const int num_blocks_per_superpixel_ = gridDim.z;
const int BLOCK_DIM = BLOCK_SEARCH_RANGE_;
int spixel_id = blockIdx.y; // superpixel index
int spixel_index_x = spixel_id % map_size.x;
int spixel_index_y = spixel_id / map_size.x;
__shared__ Vector4f color_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ Vector2f xy_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ int count_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ bool should_add;
color_shared[local_id] = Vector4f(0, 0, 0, 0);
xy_shared[local_id] = Vector2f(0, 0);
count_shared[local_id] = 0;
should_add = false;
__syncthreads();
// compute the relative position in the search window
int block_x = blockIdx.z % no_blocks_per_line;
int block_y = blockIdx.z / no_blocks_per_line;
int x_offset = block_x * BLOCK_DIM + threadIdx.x;
int y_offset = block_y * BLOCK_DIM + threadIdx.y;
if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3)
{
// compute the start of the search window
int x_start = spixel_index_x * spixel_size - spixel_size;
int y_start = spixel_index_y * spixel_size - spixel_size;
int x_img = x_start + x_offset;
int y_img = y_start + y_offset;
if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y)
{
int img_idx = (blockIdx.x*img_size.y + y_img)*img_size.x + x_img;
if (in_idx_img[img_idx] == spixel_id)
{
color_shared[local_id] = inimg[img_idx];
xy_shared[local_id] = Vector2f(x_img, y_img);
count_shared[local_id] = 1;
should_add = true;
}
}
}
__syncthreads();
if (should_add)
{
if (local_id < 128)
{
color_shared[local_id] += color_shared[local_id + 128];
xy_shared[local_id] += xy_shared[local_id + 128];
count_shared[local_id] += count_shared[local_id + 128];
}
__syncthreads();
if (local_id < 64)
{
color_shared[local_id] += color_shared[local_id + 64];
xy_shared[local_id] += xy_shared[local_id + 64];
count_shared[local_id] += count_shared[local_id + 64];
}
__syncthreads();
if (local_id < 32)
{
color_shared[local_id] += color_shared[local_id + 32];
color_shared[local_id] += color_shared[local_id + 16];
color_shared[local_id] += color_shared[local_id + 8];
color_shared[local_id] += color_shared[local_id + 4];
color_shared[local_id] += color_shared[local_id + 2];
color_shared[local_id] += color_shared[local_id + 1];
xy_shared[local_id] += xy_shared[local_id + 32];
xy_shared[local_id] += xy_shared[local_id + 16];
xy_shared[local_id] += xy_shared[local_id + 8];
xy_shared[local_id] += xy_shared[local_id + 4];
xy_shared[local_id] += xy_shared[local_id + 2];
xy_shared[local_id] += xy_shared[local_id + 1];
count_shared[local_id] += count_shared[local_id + 32];
count_shared[local_id] += count_shared[local_id + 16];
count_shared[local_id] += count_shared[local_id + 8];
count_shared[local_id] += count_shared[local_id + 4];
count_shared[local_id] += count_shared[local_id + 2];
count_shared[local_id] += count_shared[local_id + 1];
}
}
__syncthreads();
if (local_id == 0)
{
const int n = blockIdx.x;
int accum_map_idx = ((n*num_blocks_per_superpixel_+blockIdx.z)*map_size.y +
spixel_index_y)*map_size.x + spixel_index_x;
accum_map[accum_map_idx].center = xy_shared[0];
accum_map[accum_map_idx].color_info = color_shared[0];
accum_map[accum_map_idx].no_pixels = count_shared[0];
}
}
__device__ inline void finalize_reduction_result_shared(
const spixel_info* accum_map,
spixel_info* spixel_list, Vector2i map_size,
int no_blocks_per_spixel, int x, int y, int n) {
int spixel_idx = (n*map_size.y + y)*map_size.x + x;
spixel_list[spixel_idx].center = Vector2f(0, 0);
spixel_list[spixel_idx].color_info = Vector4f(0, 0, 0, 0);
spixel_list[spixel_idx].no_pixels = 0;
for (int i = 0; i < no_blocks_per_spixel; i++)
{
int accum_list_idx = ((n*no_blocks_per_spixel + i)*map_size.y + y)*map_size.x + x;
spixel_list[spixel_idx].center += accum_map[accum_list_idx].center;
spixel_list[spixel_idx].color_info += accum_map[accum_list_idx].color_info;
spixel_list[spixel_idx].no_pixels += accum_map[accum_list_idx].no_pixels;
}
if (spixel_list[spixel_idx].no_pixels != 0)
{
spixel_list[spixel_idx].center /= (float)spixel_list[spixel_idx].no_pixels;
spixel_list[spixel_idx].color_info /= (float)spixel_list[spixel_idx].no_pixels;
}
}
__global__ void Finalize_Reduction_Result_device(const int nthreads,
const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size,
int no_blocks_per_spixel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index from (N, smap_x, smap_y)
const int x = index % map_size.x;
const int y = (index / map_size.x) % map_size.y;
const int n = (index / map_size.x) / map_size.y;
finalize_reduction_result_shared(accum_map, spixel_list, map_size,
no_blocks_per_spixel, x, y, n);
}
}
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Update_Cluster_Center(Blob<Dtype>* idx_blob) {
spixel_info* accum_map_ptr = accum_map_.mutable_gpu_data();
spixel_info* spixel_list_ptr = spixel_map_.mutable_gpu_data();
const Vector4f* img_ptr = cvt_img_.gpu_data();
const Dtype* idx_ptr = idx_blob->gpu_data();
const int N = idx_blob->shape(0);
int no_blocks_per_line = spixel_size_ * 3 / BLOCK_SEARCH_RANGE_;
// Cannot use Caffe thread setting (use shared variable)
dim3 blockSize(BLOCK_SEARCH_RANGE_, BLOCK_SEARCH_RANGE_);
dim3 gridSize(N, map_size_.x * map_size_.y, num_block_per_superpixel_);
// BLOCK_DIM: BLOCK_SEARCH_RANGE
const int count = spixel_map_.count();
Update_Cluster_Center_device<<<gridSize, blockSize>>>(img_ptr, idx_ptr,
accum_map_ptr, map_size_, img_size_, spixel_size_, no_blocks_per_line);
Finalize_Reduction_Result_device<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, accum_map_ptr, spixel_list_ptr, map_size_, num_block_per_superpixel_);
}
// ----------------------------------------------------
//
// Enforce connectivity
//
// ----------------------------------------------------
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Enforce_Connectivity(Blob<Dtype>* idx_blob,
Blob<Dtype>* dead_blob)
{
const int N = idx_blob->shape(0);
// Merge superpixels which have too small number of pixels
// Indicate dead superpixels
for (int n = 0; n < N; n++) {
Dtype* idx_ptr = idx_blob->mutable_cpu_data() + idx_blob->offset(n, 0, 0, 0);
Dtype* dead_ptr = dead_blob->mutable_cpu_data() + dead_blob->offset(n, 0, 0, 0);
// loop superpixel index
for (int slic_yind = 0; slic_yind < map_size_.y; slic_yind++) {
for (int slic_xind = 0; slic_xind < map_size_.x; slic_xind++) {
int slic_index = slic_yind*map_size_.x + slic_xind;
const int hstart = max((slic_yind - 4) * spixel_size_, 0);
const int wstart = max((slic_xind - 4) * spixel_size_, 0);
const int hend = min((slic_yind + 4) * spixel_size_, img_size_.y);
const int wend = min((slic_xind + 4) * spixel_size_, img_size_.x);
int count = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (idx_ptr[h * img_size_.x + w] == slic_index) {
count++;
}
}
}
if (count < min_pixel_num_) {
// merge this superpixel with near other one.
dead_ptr[slic_index] = 1;
int new_slic_index;
if (slic_xind != map_size_.x - 1) {
new_slic_index = slic_index + 1;
} else if (slic_yind != map_size_.y - 1) {
new_slic_index = (slic_yind + 1)*map_size_.x + slic_xind;
} else {
new_slic_index = slic_index - 1;
}
// find surrounded labels and pick most similar color sp
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (idx_ptr[h * img_size_.x + w] == slic_index) {
idx_ptr[h * img_size_.x + w] = new_slic_index;
}
}
}
} else {
dead_ptr[slic_index] = 0;
}
}
}
}
}
template<typename Dtype>
void Visualize(Blob<Dtype>* top, int spixel_xdim, int spixel_ydim) {
// visualize... 3rd one?
int img_index = 0;
const Dtype* data = top->cpu_data();
Mat vismap(top->shape(2), top->shape(3), CV_8UC3);
for (int r = 0; r < top->shape(2); r++) {
for (int c = 0; c < top->shape(3); c++) {
int index = (img_index*top->shape(2) + r)*top->shape(3) + c;
Dtype value = data[index];
int slic_xindex = (int)value % spixel_xdim;
int slic_yindex = (int)value / spixel_xdim;
Vec3b color( slic_xindex/(float)spixel_xdim*255,
slic_yindex/(float)spixel_ydim*255, 0);
//Vec3b color( slic_xindex/(float)spixel_xdim*255,slic_xindex/(float)spixel_xdim*255,
// slic_xindex/(float)spixel_xdim*255);
vismap.at<Vec3b>(r, c) = color;
}
}
imshow("ahahaha", vismap);
waitKey(0);
}
template<typename Dtype>
void Visualize22(Blob<Dtype>* top, int spixel_xdim, int spixel_ydim, Blob<Dtype>* img) {
// visualize... 3rd one?
int img_index = 0;
const Dtype* data = top->cpu_data();
const Dtype* img_data = img->cpu_data();
Mat vismap(top->shape(2), top->shape(3), CV_8UC3);
int means[3] = {104, 117, 123};
for (int ch = 0; ch < 3; ch++) {
for (int r = 0; r < top->shape(2); r++) {
for (int c = 0; c < top->shape(3); c++) {
vismap.at<Vec3b>(r, c)[ch] = static_cast<uchar>(means[ch] +
*(img_data + ((img_index * 3 + ch)*top->shape(2) + r )*top->shape(3) + c));
}
}
}
for (int r = 1; r < top->shape(2)-1; r++) {
for (int c = 1; c < top->shape(3)-1; c++) {
int index = (img_index*top->shape(2) + r)*top->shape(3) + c;
Dtype value = data[index];
bool boundary = false;
for (int i = -1; i <= 0; i++) {
for (int j = -1; j <= 0; j++) {
if (i == 0 && j == 0) {
} else {
int tmp_index = (img_index*top->shape(2) + (r+i))
*top->shape(3) + (c+j);
Dtype val2 = data[tmp_index];
if (val2 != value) {
boundary = true;
}
}
}
}
if (boundary) {
Vec3b color(0,0,255);
vismap.at<Vec3b>(r, c) = color;
}
}
}
imshow("ahahaha2", vismap);
waitKey(0);
}
// ---------------------------------------------------
//
// Caffe forward implementation
//
// ---------------------------------------------------
template<typename Dtype>
void GenSlicinfoLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
boost::chrono::system_clock::time_point start;
start = boost::chrono::system_clock::now();
// Convert Image space from BGR to XYZ
Cvt_Img_Space(bottom[0], &cvt_img_, COLOR_CHAN_);
Init_Cluster_Centers();
Find_Center_Association(top[0]);
//Visualize(top[0], map_size_.x, map_size_.y);
for (int i = 0; i < 3; i++) { // TODO: iter_num hardcoded
Update_Cluster_Center(top[0]);
Find_Center_Association(top[0]);
}
Enforce_Connectivity(top[0], top[1]);// <- need to solve slic index problem.
//Visualize(top[0], map_size_.x, map_size_.y);
//Visualize22(top[0], map_size_.x, map_size_.y, bottom[0]);
// synchronize?
// cudaThreadSynchronize();
boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
//LOG(INFO) << "time : " << sec.count() << "s";
}
INSTANTIATE_LAYER_GPU_FUNCS(GenSlicinfoLayer);
}
|
3f9b7c8333158be417df93d41e7b2bece8f00795.hip | // !!! This is a file automatically generated by hipify!!!
#if defined(__HIPCC__)
#ifndef BOOST_NOINLINE
# define BOOST_NOINLINE __attribute__ ((noinline))
#endif //BOOST_NOINLINE
#endif //__HIPCC__
#include <cugip/image.hpp>
//#include <cugip/memory_view.hpp>
//#include <cugip/memory.hpp>
#include <cugip/copy.hpp>
#include <cugip/host_image_view.hpp>
#include <cugip/cellular_automata/cellular_automata.hpp>
#include <cugip/cellular_automata/async_cellular_automata.hpp>
#include <cugip/procedural_views.hpp>
#include <cugip/view_arithmetics.hpp>
#include <thrust/device_vector.h>
#include "watershed_options.hpp"
#include <cugip/timers.hpp>
using namespace cugip;
struct InitWatershed
{
CUGIP_DECL_HYBRID
Tuple<float, int32_t, float>
operator()(float aGradient, int32_t aLocalMinimum) const
{
return Tuple<float, int32_t, float>(aGradient, aLocalMinimum, aLocalMinimum > 0 ? 0 : 1.0e15);
}
};
struct ZipGradientAndLabel
{
CUGIP_DECL_HYBRID
Tuple<float, int32_t>
operator()(float aGradient, int32_t aLocalMinimum) const
{
return Tuple<float, int32_t>(aGradient, aLocalMinimum);
}
};
struct BlockConvergenceFlagView
{
template<typename TGlobalState>
CUGIP_DECL_DEVICE
void update_global(TGlobalState &aGlobalState)
{
//TODO
if (*is_signaled_ptr || *iteration_ptr > 1) {
aGlobalState.signal();
}
}
template<typename TGlobalState>
CUGIP_DECL_DEVICE
void update_global2(TGlobalState &aGlobalState)
{
update_global(aGlobalState);
}
CUGIP_DECL_DEVICE
void signal()
{
*is_signaled_ptr = true;
}
bool *is_signaled_ptr;
int *iteration_ptr;
};
template<int tIterationLimit = 10000>
struct BlockConvergenceFlag: public BlockConvergenceFlagView
{
CUGIP_DECL_DEVICE
void initialize()
{
this->is_signaled_ptr = &is_signaled;
this->iteration_ptr = &iteration;
iteration = 0;
}
CUGIP_DECL_DEVICE
void preprocess()
{
if (is_in_thread(0,0,0)) {
is_signaled = false;
++iteration;
}
}
CUGIP_DECL_DEVICE
bool is_finished()
{
return !is_signaled || iteration > tIterationLimit;
}
CUGIP_DECL_DEVICE
BlockConvergenceFlagView view()
{
return static_cast<BlockConvergenceFlagView>(*this);
}
bool is_signaled;
int iteration;
};
struct WatershedByPointer : WatershedSteepestDescentRuleBase
{
template<typename T>
using remove_reference = typename std::remove_reference<T>::type;
//TODO - global state by reference
template<typename TNeighborhood, typename TConvergenceFlag>
CUGIP_DECL_DEVICE
auto operator()(int aIteration, TNeighborhood aNeighborhood, TConvergenceFlag aConvergenceState) -> remove_reference<decltype(aNeighborhood[0])> const
{
//input, label
auto gridView = aNeighborhood.locator().view();
auto value = aNeighborhood[0];
if (get<1>(value) < 0) {
auto position = index_from_linear_access_index(gridView, (-1 * get<1>(value)) - 1);
auto newValue = gridView[position];
/*int currentLabel = -1 * (1 + get_linear_access_index(gridView.dimensions(), aNeighborhood.locator().coords()));
if (aIteration > 30) {
printf("AAAAAAAAAAA %d %d, %d\n", get<1>(value), get<1>(newValue), currentLabel);
}*/
if (get<1>(value) != get<1>(newValue)) {
get<1>(value) = get<1>(newValue);
aConvergenceState.signal();
}
} else {
if (get<1>(value) == 0) {
int index = -1;
auto minValue = get<0>(value);
for (int i = 1; i < aNeighborhood.size(); ++i) {
auto current = get<0>(aNeighborhood[i]);
//printf("%d %d - %d val = %d -> %d\n", threadIdx.x, threadIdx.y, i, get<1>(aNeighborhood[0]), get<1>(aNeighborhood[i]));
if (current <= minValue && aNeighborhood.is_inside_valid_region(i)) {
index = i;
minValue = current;
}
}
if (index != -1) {
get<1>(value) = -1 * (1 + get_linear_access_index(gridView.dimensions(), aNeighborhood.view_index(index)));
}
aConvergenceState.signal();
}
}
return value;
}
};
template<int tDimension, typename TGradientView, typename TLabelView>
void initLocalMinimaLabels(
TGradientView deviceGradient,
TLabelView labels,
AggregatingTimerSet<2, int> &timer,
device_flag &convergenceFlag
)
{
typedef Tuple<float, int32_t> Value2;
typedef CellularAutomatonWithGlobalState<
Grid<Value2, tDimension>,
VonNeumannNeighborhood<tDimension>,
LocalMinimaConnectedComponentRule,
LocalMinimaEquivalenceGlobalState<int32_t>> LocalMinimaAutomaton;
auto localMinima = unaryOperatorOnLocator(deviceGradient, LocalMinimumLabel());
LocalMinimaEquivalenceGlobalState<int32_t> globalState;
thrust::device_vector<int32_t> buffer;
buffer.resize(elementCount(deviceGradient) + 1);
globalState.manager = EquivalenceManager<int32_t>(thrust::raw_pointer_cast(&buffer[0]), buffer.size());
globalState.mDeviceFlag = convergenceFlag.view();
globalState.manager.initialize();
LocalMinimaAutomaton localMinimumAutomaton;
localMinimumAutomaton.initialize(
nAryOperator(ZipGradientAndLabel(), deviceGradient, localMinima),
globalState);
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
localMinimumAutomaton.iterate(1);
} while (!globalState.is_finished());
//localMinimumAutomaton.iterate(100);
copy(getDimension(localMinimumAutomaton.getCurrentState(), IntValue<1>()), labels);
}
template<int tDimension>
void distanceBasedWShed(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t, float> Value;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
device_flag convergenceFlag;
AggregatingTimerSet<2, int> timer;
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
auto wshed = nAryOperator(InitWatershed(), const_view(deviceGradient), const_view(labels));
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedRule,
ConvergenceFlag> WatershedAutomaton;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension, int tIterationLimit>
void distanceBasedWShedAsync(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t, float> Value;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
device_flag convergenceFlag;
AggregatingTimerSet<2, int> timer;
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
auto wshed = nAryOperator(InitWatershed(), const_view(deviceGradient), const_view(labels));
typedef AsyncCellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedRule,
ConvergenceFlag,
BlockConvergenceFlag<tIterationLimit>> WatershedAutomaton;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension, typename TView>
void lowerCompletion(TView aInput)
{
//lower completion
device_image<float, tDimension> tmp(aInput.dimensions());
transform_locator(aInput, view(tmp), HandlePlateauBorder());
copy(const_view(tmp), aInput);
}
template<int tDimension>
void steepestDescentWShedSimple(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedPointer(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedByPointer,
ConvergenceFlag> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
AggregatingTimerSet<2, int> timer;
device_flag convergenceFlag;
lowerCompletion<tDimension>(view(deviceGradient));
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), const_view(labels));
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedSimpleAsync(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef AsyncCellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag,
BlockConvergenceFlag<1000>> WatershedAutomaton;
/*typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag> WatershedAutomaton;*/
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedGlobalState(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentGlobalStateRule,
LocalMinimaEquivalenceGlobalState<int32_t>> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
LocalMinimaEquivalenceGlobalState<int32_t> globalState;
thrust::device_vector<int32_t> buffer;
buffer.resize(elementCount(aInput) + 1);
globalState.manager = EquivalenceManager<int32_t>(thrust::raw_pointer_cast(&buffer[0]), buffer.size());
globalState.mDeviceFlag = convergenceFlag.view();
globalState.manager.initialize();
WatershedAutomaton automaton;
automaton.initialize(wshed, globalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!globalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedAsyncGlobalState(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
}
template<int tDimension>
void steepestDescentWShedPointers(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
}
//******************************************************************************
template<int tDimension>
void runWatershedTransformationDim(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
switch (aOptions.wshedVariant) {
case WatershedVariant::DistanceBased:
distanceBasedWShed<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::DistanceBasedAsync:
distanceBasedWShedAsync<tDimension, 10000>(aInput, aOutput, aOptions);
break;
case WatershedVariant::DistanceBasedAsyncLimited:
distanceBasedWShedAsync<tDimension, 2>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentSimple:
steepestDescentWShedSimple<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentSimpleAsync:
steepestDescentWShedSimpleAsync<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentGlobalState:
steepestDescentWShedGlobalState<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentPointer:
steepestDescentWShedPointer<tDimension>(aInput, aOutput, aOptions);
break;
default:
std::cerr << "Unknown watershed variant\n";
}
}
void runWatershedTransformation(
const_host_image_view<const float, 2> aInput,
host_image_view<int32_t, 2> aOutput,
const WatershedOptions &aOptions)
{
//runWatershedTransformationDim<2>(aInput, aOutput, aOptions);
}
void runWatershedTransformation(
const_host_image_view<const float, 3> aInput,
host_image_view<int32_t, 3> aOutput,
const WatershedOptions &aOptions)
{
runWatershedTransformationDim<3>(aInput, aOutput, aOptions);
}
| 3f9b7c8333158be417df93d41e7b2bece8f00795.cu | #if defined(__CUDACC__)
#ifndef BOOST_NOINLINE
# define BOOST_NOINLINE __attribute__ ((noinline))
#endif //BOOST_NOINLINE
#endif //__CUDACC__
#include <cugip/image.hpp>
//#include <cugip/memory_view.hpp>
//#include <cugip/memory.hpp>
#include <cugip/copy.hpp>
#include <cugip/host_image_view.hpp>
#include <cugip/cellular_automata/cellular_automata.hpp>
#include <cugip/cellular_automata/async_cellular_automata.hpp>
#include <cugip/procedural_views.hpp>
#include <cugip/view_arithmetics.hpp>
#include <thrust/device_vector.h>
#include "watershed_options.hpp"
#include <cugip/timers.hpp>
using namespace cugip;
struct InitWatershed
{
CUGIP_DECL_HYBRID
Tuple<float, int32_t, float>
operator()(float aGradient, int32_t aLocalMinimum) const
{
return Tuple<float, int32_t, float>(aGradient, aLocalMinimum, aLocalMinimum > 0 ? 0 : 1.0e15);
}
};
struct ZipGradientAndLabel
{
CUGIP_DECL_HYBRID
Tuple<float, int32_t>
operator()(float aGradient, int32_t aLocalMinimum) const
{
return Tuple<float, int32_t>(aGradient, aLocalMinimum);
}
};
struct BlockConvergenceFlagView
{
template<typename TGlobalState>
CUGIP_DECL_DEVICE
void update_global(TGlobalState &aGlobalState)
{
//TODO
if (*is_signaled_ptr || *iteration_ptr > 1) {
aGlobalState.signal();
}
}
template<typename TGlobalState>
CUGIP_DECL_DEVICE
void update_global2(TGlobalState &aGlobalState)
{
update_global(aGlobalState);
}
CUGIP_DECL_DEVICE
void signal()
{
*is_signaled_ptr = true;
}
bool *is_signaled_ptr;
int *iteration_ptr;
};
template<int tIterationLimit = 10000>
struct BlockConvergenceFlag: public BlockConvergenceFlagView
{
CUGIP_DECL_DEVICE
void initialize()
{
this->is_signaled_ptr = &is_signaled;
this->iteration_ptr = &iteration;
iteration = 0;
}
CUGIP_DECL_DEVICE
void preprocess()
{
if (is_in_thread(0,0,0)) {
is_signaled = false;
++iteration;
}
}
CUGIP_DECL_DEVICE
bool is_finished()
{
return !is_signaled || iteration > tIterationLimit;
}
CUGIP_DECL_DEVICE
BlockConvergenceFlagView view()
{
return static_cast<BlockConvergenceFlagView>(*this);
}
bool is_signaled;
int iteration;
};
struct WatershedByPointer : WatershedSteepestDescentRuleBase
{
template<typename T>
using remove_reference = typename std::remove_reference<T>::type;
//TODO - global state by reference
template<typename TNeighborhood, typename TConvergenceFlag>
CUGIP_DECL_DEVICE
auto operator()(int aIteration, TNeighborhood aNeighborhood, TConvergenceFlag aConvergenceState) -> remove_reference<decltype(aNeighborhood[0])> const
{
//input, label
auto gridView = aNeighborhood.locator().view();
auto value = aNeighborhood[0];
if (get<1>(value) < 0) {
auto position = index_from_linear_access_index(gridView, (-1 * get<1>(value)) - 1);
auto newValue = gridView[position];
/*int currentLabel = -1 * (1 + get_linear_access_index(gridView.dimensions(), aNeighborhood.locator().coords()));
if (aIteration > 30) {
printf("AAAAAAAAAAA %d %d, %d\n", get<1>(value), get<1>(newValue), currentLabel);
}*/
if (get<1>(value) != get<1>(newValue)) {
get<1>(value) = get<1>(newValue);
aConvergenceState.signal();
}
} else {
if (get<1>(value) == 0) {
int index = -1;
auto minValue = get<0>(value);
for (int i = 1; i < aNeighborhood.size(); ++i) {
auto current = get<0>(aNeighborhood[i]);
//printf("%d %d - %d val = %d -> %d\n", threadIdx.x, threadIdx.y, i, get<1>(aNeighborhood[0]), get<1>(aNeighborhood[i]));
if (current <= minValue && aNeighborhood.is_inside_valid_region(i)) {
index = i;
minValue = current;
}
}
if (index != -1) {
get<1>(value) = -1 * (1 + get_linear_access_index(gridView.dimensions(), aNeighborhood.view_index(index)));
}
aConvergenceState.signal();
}
}
return value;
}
};
template<int tDimension, typename TGradientView, typename TLabelView>
void initLocalMinimaLabels(
TGradientView deviceGradient,
TLabelView labels,
AggregatingTimerSet<2, int> &timer,
device_flag &convergenceFlag
)
{
typedef Tuple<float, int32_t> Value2;
typedef CellularAutomatonWithGlobalState<
Grid<Value2, tDimension>,
VonNeumannNeighborhood<tDimension>,
LocalMinimaConnectedComponentRule,
LocalMinimaEquivalenceGlobalState<int32_t>> LocalMinimaAutomaton;
auto localMinima = unaryOperatorOnLocator(deviceGradient, LocalMinimumLabel());
LocalMinimaEquivalenceGlobalState<int32_t> globalState;
thrust::device_vector<int32_t> buffer;
buffer.resize(elementCount(deviceGradient) + 1);
globalState.manager = EquivalenceManager<int32_t>(thrust::raw_pointer_cast(&buffer[0]), buffer.size());
globalState.mDeviceFlag = convergenceFlag.view();
globalState.manager.initialize();
LocalMinimaAutomaton localMinimumAutomaton;
localMinimumAutomaton.initialize(
nAryOperator(ZipGradientAndLabel(), deviceGradient, localMinima),
globalState);
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
localMinimumAutomaton.iterate(1);
} while (!globalState.is_finished());
//localMinimumAutomaton.iterate(100);
copy(getDimension(localMinimumAutomaton.getCurrentState(), IntValue<1>()), labels);
}
template<int tDimension>
void distanceBasedWShed(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t, float> Value;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
device_flag convergenceFlag;
AggregatingTimerSet<2, int> timer;
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
auto wshed = nAryOperator(InitWatershed(), const_view(deviceGradient), const_view(labels));
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedRule,
ConvergenceFlag> WatershedAutomaton;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension, int tIterationLimit>
void distanceBasedWShedAsync(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t, float> Value;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
device_flag convergenceFlag;
AggregatingTimerSet<2, int> timer;
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
auto wshed = nAryOperator(InitWatershed(), const_view(deviceGradient), const_view(labels));
typedef AsyncCellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedRule,
ConvergenceFlag,
BlockConvergenceFlag<tIterationLimit>> WatershedAutomaton;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension, typename TView>
void lowerCompletion(TView aInput)
{
//lower completion
device_image<float, tDimension> tmp(aInput.dimensions());
transform_locator(aInput, view(tmp), HandlePlateauBorder());
copy(const_view(tmp), aInput);
}
template<int tDimension>
void steepestDescentWShedSimple(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedPointer(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedByPointer,
ConvergenceFlag> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
AggregatingTimerSet<2, int> timer;
device_flag convergenceFlag;
lowerCompletion<tDimension>(view(deviceGradient));
initLocalMinimaLabels<tDimension>(const_view(deviceGradient), view(labels), timer, convergenceFlag);
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), const_view(labels));
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
int iteration = 0;
do {
auto interval = timer.start(1, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Local minima search", "Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedSimpleAsync(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef AsyncCellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag,
BlockConvergenceFlag<1000>> WatershedAutomaton;
/*typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentRule,
ConvergenceFlag> WatershedAutomaton;*/
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
ConvergenceFlag convergenceGlobalState;
convergenceGlobalState.mDeviceFlag = convergenceFlag.view();
WatershedAutomaton automaton;
automaton.initialize(wshed, convergenceGlobalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!convergenceGlobalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedGlobalState(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
typedef Tuple<float, int32_t> Value;
typedef CellularAutomatonWithGlobalState<
Grid<Value, tDimension>,
MooreNeighborhood<tDimension>,
WatershedSteepestDescentGlobalStateRule,
LocalMinimaEquivalenceGlobalState<int32_t>> WatershedAutomaton;
device_image<float, tDimension> deviceGradient(aInput.dimensions());
device_image<int32_t, tDimension> labels(aInput.dimensions());
copy(aInput, view(deviceGradient));
lowerCompletion<tDimension>(view(deviceGradient));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), view(labels));
//auto wshed = nAryOperator(ZipGradientAndLabel(), const_view(deviceGradient), UniqueIdDeviceImageView<tDimension, int32_t>(aInput.dimensions()));
auto wshed = zipViews(const_view(deviceGradient), UniqueIdDeviceImageView<tDimension>(aInput.dimensions()));
device_flag convergenceFlag;
LocalMinimaEquivalenceGlobalState<int32_t> globalState;
thrust::device_vector<int32_t> buffer;
buffer.resize(elementCount(aInput) + 1);
globalState.manager = EquivalenceManager<int32_t>(thrust::raw_pointer_cast(&buffer[0]), buffer.size());
globalState.mDeviceFlag = convergenceFlag.view();
globalState.manager.initialize();
WatershedAutomaton automaton;
automaton.initialize(wshed, globalState);
AggregatingTimerSet<1, int> timer;
int iteration = 0;
do {
auto interval = timer.start(0, iteration++);
automaton.iterate(1);
} while (!globalState.is_finished());
std::cout << timer.createReport({"Wshed iterations"});
auto state = automaton.getCurrentState();
copy(getDimension(state, IntValue<1>()), view(labels));
copy(const_view(labels), aOutput);
}
template<int tDimension>
void steepestDescentWShedAsyncGlobalState(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
}
template<int tDimension>
void steepestDescentWShedPointers(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
}
//******************************************************************************
template<int tDimension>
void runWatershedTransformationDim(
const_host_image_view<const float, tDimension> aInput,
host_image_view<int32_t, tDimension> aOutput,
const WatershedOptions &aOptions)
{
switch (aOptions.wshedVariant) {
case WatershedVariant::DistanceBased:
distanceBasedWShed<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::DistanceBasedAsync:
distanceBasedWShedAsync<tDimension, 10000>(aInput, aOutput, aOptions);
break;
case WatershedVariant::DistanceBasedAsyncLimited:
distanceBasedWShedAsync<tDimension, 2>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentSimple:
steepestDescentWShedSimple<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentSimpleAsync:
steepestDescentWShedSimpleAsync<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentGlobalState:
steepestDescentWShedGlobalState<tDimension>(aInput, aOutput, aOptions);
break;
case WatershedVariant::SteepestDescentPointer:
steepestDescentWShedPointer<tDimension>(aInput, aOutput, aOptions);
break;
default:
std::cerr << "Unknown watershed variant\n";
}
}
void runWatershedTransformation(
const_host_image_view<const float, 2> aInput,
host_image_view<int32_t, 2> aOutput,
const WatershedOptions &aOptions)
{
//runWatershedTransformationDim<2>(aInput, aOutput, aOptions);
}
void runWatershedTransformation(
const_host_image_view<const float, 3> aInput,
host_image_view<int32_t, 3> aOutput,
const WatershedOptions &aOptions)
{
runWatershedTransformationDim<3>(aInput, aOutput, aOptions);
}
|
fe65844e3d636a9944f1323b96e2aa7767ee4149.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
/*
* This example demonstrates two techniques for using the cuRAND host and device
* API to generate random numbers for CUDA kernels to consume.
*/
int threads_per_block = 256;
int blocks_per_grid = 30;
/*
* host_api_kernel consumes pre-generated random values from the cuRAND host API
* to perform some dummy computation.
*/
__global__ void host_api_kernel(float *randomValues, float *out, int N)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
for (i = tid; i < N; i += nthreads)
{
float rand = randomValues[i];
rand = rand * 2;
out[i] = rand;
}
}
/*
* device_api_kernel uses the cuRAND device API to generate random numbers
* on-the-fly on the GPU, and then performs some dummy computation using them.
*/
__global__ void device_api_kernel(hiprandState_t *states, float *out, int N)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
hiprandState_t *state = states + tid;
hiprand_init(9384, tid, 0, state);
for (i = tid; i < N; i += nthreads)
{
float rand = hiprand_uniform(state);
rand = rand * 2;
out[i] = rand;
}
}
/*
* use_host_api is an examples usage of the cuRAND host API to generate random
* values to be consumed on the device.
*/
void use_host_api(int N)
{
int i;
hiprandGenerator_t randGen;
float *dRand, *dOut, *hOut;
// Create cuRAND generator (i.e. handle)
CHECK_CURAND(hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_DEFAULT));
// Allocate device memory to store the random values and output
CHECK(hipMalloc((void **)&dRand, sizeof(float) * N));
CHECK(hipMalloc((void **)&dOut, sizeof(float) * N));
hOut = (float *)malloc(sizeof(float) * N);
// Generate N random values from a uniform distribution
CHECK_CURAND(hiprandGenerateUniform(randGen, dRand, N));
// Consume the values generated by hiprandGenerateUniform
hipLaunchKernelGGL(( host_api_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dRand, dOut, N);
// Retrieve outputs
CHECK(hipMemcpy(hOut, dOut, sizeof(float) * N, hipMemcpyDeviceToHost));
printf("Sampling of output from host API:\n");
for (i = 0; i < 10; i++)
{
printf("%2.4f\n", hOut[i]);
}
printf("...\n");
free(hOut);
CHECK(hipFree(dRand));
CHECK(hipFree(dOut));
CHECK_CURAND(hiprandDestroyGenerator(randGen));
}
/*
* use_device_api is an examples usage of the cuRAND device API to use the GPU
* to generate random values on the fly from inside a CUDA kernel.
*/
void use_device_api(int N)
{
int i;
static hiprandState_t *states = NULL;
float *dOut, *hOut;
/*
* Allocate device memory to store the output and cuRAND device state
* objects (which are analogous to handles, but on the GPU).
*/
CHECK(hipMalloc((void **)&dOut, sizeof(float) * N));
CHECK(hipMalloc((void **)&states, sizeof(hiprandState_t) *
threads_per_block * blocks_per_grid));
hOut = (float *)malloc(sizeof(float) * N);
// Execute a kernel that generates and consumes its own random numbers
hipLaunchKernelGGL(( device_api_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, states, dOut, N);
// Retrieve the results
CHECK(hipMemcpy(hOut, dOut, sizeof(float) * N, hipMemcpyDeviceToHost));
printf("Sampling of output from device API:\n");
for (i = 0; i < 10; i++)
{
printf("%2.4f\n", hOut[i]);
}
printf("...\n");
free(hOut);
CHECK(hipFree(dOut));
CHECK(hipFree(states));
}
int main(int argc, char **argv)
{
int N = 8388608;
use_host_api(N);
use_device_api(N);
return 0;
}
| fe65844e3d636a9944f1323b96e2aa7767ee4149.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
/*
* This example demonstrates two techniques for using the cuRAND host and device
* API to generate random numbers for CUDA kernels to consume.
*/
int threads_per_block = 256;
int blocks_per_grid = 30;
/*
* host_api_kernel consumes pre-generated random values from the cuRAND host API
* to perform some dummy computation.
*/
__global__ void host_api_kernel(float *randomValues, float *out, int N)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
for (i = tid; i < N; i += nthreads)
{
float rand = randomValues[i];
rand = rand * 2;
out[i] = rand;
}
}
/*
* device_api_kernel uses the cuRAND device API to generate random numbers
* on-the-fly on the GPU, and then performs some dummy computation using them.
*/
__global__ void device_api_kernel(curandState *states, float *out, int N)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
curandState *state = states + tid;
curand_init(9384, tid, 0, state);
for (i = tid; i < N; i += nthreads)
{
float rand = curand_uniform(state);
rand = rand * 2;
out[i] = rand;
}
}
/*
* use_host_api is an examples usage of the cuRAND host API to generate random
* values to be consumed on the device.
*/
void use_host_api(int N)
{
int i;
curandGenerator_t randGen;
float *dRand, *dOut, *hOut;
// Create cuRAND generator (i.e. handle)
CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT));
// Allocate device memory to store the random values and output
CHECK(cudaMalloc((void **)&dRand, sizeof(float) * N));
CHECK(cudaMalloc((void **)&dOut, sizeof(float) * N));
hOut = (float *)malloc(sizeof(float) * N);
// Generate N random values from a uniform distribution
CHECK_CURAND(curandGenerateUniform(randGen, dRand, N));
// Consume the values generated by curandGenerateUniform
host_api_kernel<<<blocks_per_grid, threads_per_block>>>(dRand, dOut, N);
// Retrieve outputs
CHECK(cudaMemcpy(hOut, dOut, sizeof(float) * N, cudaMemcpyDeviceToHost));
printf("Sampling of output from host API:\n");
for (i = 0; i < 10; i++)
{
printf("%2.4f\n", hOut[i]);
}
printf("...\n");
free(hOut);
CHECK(cudaFree(dRand));
CHECK(cudaFree(dOut));
CHECK_CURAND(curandDestroyGenerator(randGen));
}
/*
* use_device_api is an examples usage of the cuRAND device API to use the GPU
* to generate random values on the fly from inside a CUDA kernel.
*/
void use_device_api(int N)
{
int i;
static curandState *states = NULL;
float *dOut, *hOut;
/*
* Allocate device memory to store the output and cuRAND device state
* objects (which are analogous to handles, but on the GPU).
*/
CHECK(cudaMalloc((void **)&dOut, sizeof(float) * N));
CHECK(cudaMalloc((void **)&states, sizeof(curandState) *
threads_per_block * blocks_per_grid));
hOut = (float *)malloc(sizeof(float) * N);
// Execute a kernel that generates and consumes its own random numbers
device_api_kernel<<<blocks_per_grid, threads_per_block>>>(states, dOut, N);
// Retrieve the results
CHECK(cudaMemcpy(hOut, dOut, sizeof(float) * N, cudaMemcpyDeviceToHost));
printf("Sampling of output from device API:\n");
for (i = 0; i < 10; i++)
{
printf("%2.4f\n", hOut[i]);
}
printf("...\n");
free(hOut);
CHECK(cudaFree(dOut));
CHECK(cudaFree(states));
}
int main(int argc, char **argv)
{
int N = 8388608;
use_host_api(N);
use_device_api(N);
return 0;
}
|
8bcddc5e47eb7ed4c77e38dd73059503f41df7ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathBlas.hip"
#else
#include <ATen/hip/HIPContext.h>
#include <ATen/NamedTensorUtils.h>
#define ERROR_ONLY_FP_TYPES(func) \
THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func);
__global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1,
scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer1[idx] = data1 + idx * stride1;
buffer2[idx] = data2 + idx * stride2;
buffer3[idx] = data3 + idx * stride3;
}
}
void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2,
scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t);
{
at::NoNamesGuard guard;
THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
int64_t lda, ldb, ldc;
THCTensor *result_, *batch1_, *batch2_;
if (result->stride(1) == 1 &&
(result->size(2) == 1 || result->stride(2) >= std::max<int64_t>(1, result->size(1))))
{
transpose_result = false;
result_ = result;
ldc = result_->stride(2);
}
else if (result->stride(2) == 1 &&
(result->size(1) == 1 || result->stride(1) >= std::max<int64_t>(1, result->size(2))))
{
transpose_result = true;
THCTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride(1);
}
else
{
transpose_result = false;
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2);
result_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, result_, NULL, 1, 2);
ldc = result_->stride(2);
}
const int64_t m = result->size(transpose_result ? 2 : 1);
const int64_t n = result->size(transpose_result ? 1 : 2);
const int64_t k = batch1->size(transpose_result ? 1 : 2);
if (batch1->stride(transpose_result ? 2 : 1) == 1 &&
batch1->stride(transpose_result ? 1 : 2) >= std::max<int64_t>(1, m))
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 1 : 2);
}
else if (batch1->stride(transpose_result ? 1 : 2) == 1 &&
batch1->stride(transpose_result ? 2 : 1) >= std::max<int64_t>(1, k))
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
// batch1_ is later freed if batch1_ != batch1
if (THCTensor_(isContiguous)(state, batch1)) {
batch1_ = batch1;
} else {
batch1_ = THCTensor_(newContiguous)(state, batch1);
}
lda = batch1_->stride(1);
}
if (batch2->stride(transpose_result ? 2 : 1) == 1 &&
batch2->stride(transpose_result ? 1 : 2) >= std::max<int64_t>(1, k))
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 1 : 2);
}
else if (batch2->stride(transpose_result ? 1 : 2) == 1 &&
batch2->stride(transpose_result ? 2 : 1) >= std::max<int64_t>(1, n))
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
// batch2_ is later freed if batch2_ != batch2
if (THCTensor_(isContiguous)(state, batch2)) {
batch2_ = batch2;
} else {
batch2_ = THCTensor_(newContiguous)(state, batch2);
}
ldb = batch2_->stride(1);
}
int64_t num_batches = result_->size(0);
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
// Compute pointers to matrices in each batch.
#if TORCH_HIP_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__
size_t matrices_size = num_batches * sizeof(scalar_t*);
// Copy pointers to device.
auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size));
const int64_t block = 512;
const int64_t grid = (num_batches + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_),
THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_),
batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#endif //THC_REAL
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
#else
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif //THC_REAL
#endif //TORCH_HIP_VERSION
#elif defined(THC_REAL_IS_HALF)
#if TORCH_HIP_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__)
// Currently no HgemmBatched in Cublas
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
#else
#ifndef __HIP_PLATFORM_HCC__
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major >= 5){
#endif
THCudaBlas_HgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#ifndef __HIP_PLATFORM_HCC__
} else {
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
}
#endif
#endif //TORCH_HIP_VERSION
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__) || defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
THCudaBlas_BgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif // __HIP_PLATFORM_HCC__
#endif
if (batch1_ != batch1) {
THCTensor_(free)(state, batch1_);
}
if (batch2_ != batch2) {
THCTensor_(free)(state, batch2_);
}
if (result_ != result) {
THCTensor_(freeCopyTo)(state, result_, result);
}
#if defined(THC_REAL_IS_BFLOAT16) && !(defined(__HIP_PLATFORM_HCC__) || defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000)
// To avoid "variable was set but never used" warning
[&transpose_batch1, &transpose_batch2, &lda, &ldb, &ldc]{}();
TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type");
#endif
}
at::namedinference::propagate_names_if_nonempty(result, maybe_outnames);
#else
ERROR_ONLY_FP_TYPES("baddbmm");
#endif
}
#endif
| 8bcddc5e47eb7ed4c77e38dd73059503f41df7ce.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathBlas.cu"
#else
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NamedTensorUtils.h>
#define ERROR_ONLY_FP_TYPES(func) \
THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func);
__global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1,
scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer1[idx] = data1 + idx * stride1;
buffer2[idx] = data2 + idx * stride2;
buffer3[idx] = data3 + idx * stride3;
}
}
void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2,
scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t);
{
at::NoNamesGuard guard;
THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
int64_t lda, ldb, ldc;
THCTensor *result_, *batch1_, *batch2_;
if (result->stride(1) == 1 &&
(result->size(2) == 1 || result->stride(2) >= std::max<int64_t>(1, result->size(1))))
{
transpose_result = false;
result_ = result;
ldc = result_->stride(2);
}
else if (result->stride(2) == 1 &&
(result->size(1) == 1 || result->stride(1) >= std::max<int64_t>(1, result->size(2))))
{
transpose_result = true;
THCTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride(1);
}
else
{
transpose_result = false;
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2);
result_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, result_, NULL, 1, 2);
ldc = result_->stride(2);
}
const int64_t m = result->size(transpose_result ? 2 : 1);
const int64_t n = result->size(transpose_result ? 1 : 2);
const int64_t k = batch1->size(transpose_result ? 1 : 2);
if (batch1->stride(transpose_result ? 2 : 1) == 1 &&
batch1->stride(transpose_result ? 1 : 2) >= std::max<int64_t>(1, m))
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 1 : 2);
}
else if (batch1->stride(transpose_result ? 1 : 2) == 1 &&
batch1->stride(transpose_result ? 2 : 1) >= std::max<int64_t>(1, k))
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
// batch1_ is later freed if batch1_ != batch1
if (THCTensor_(isContiguous)(state, batch1)) {
batch1_ = batch1;
} else {
batch1_ = THCTensor_(newContiguous)(state, batch1);
}
lda = batch1_->stride(1);
}
if (batch2->stride(transpose_result ? 2 : 1) == 1 &&
batch2->stride(transpose_result ? 1 : 2) >= std::max<int64_t>(1, k))
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 1 : 2);
}
else if (batch2->stride(transpose_result ? 1 : 2) == 1 &&
batch2->stride(transpose_result ? 2 : 1) >= std::max<int64_t>(1, n))
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
// batch2_ is later freed if batch2_ != batch2
if (THCTensor_(isContiguous)(state, batch2)) {
batch2_ = batch2;
} else {
batch2_ = THCTensor_(newContiguous)(state, batch2);
}
ldb = batch2_->stride(1);
}
int64_t num_batches = result_->size(0);
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
// Compute pointers to matrices in each batch.
#if CUDA_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__
size_t matrices_size = num_batches * sizeof(scalar_t*);
// Copy pointers to device.
auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size));
const int64_t block = 512;
const int64_t grid = (num_batches + block - 1) / block;
createBatchGemmBuffer3<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_),
THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_),
batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#endif //THC_REAL
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
#else
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif //THC_REAL
#endif //CUDA_VERSION
#elif defined(THC_REAL_IS_HALF)
#if CUDA_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__)
// Currently no HgemmBatched in Cublas
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
#else
#ifndef __HIP_PLATFORM_HCC__
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major >= 5){
#endif
THCudaBlas_HgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#ifndef __HIP_PLATFORM_HCC__
} else {
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
}
#endif
#endif //CUDA_VERSION
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000
THCudaBlas_BgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif // __HIP_PLATFORM_HCC__
#endif
if (batch1_ != batch1) {
THCTensor_(free)(state, batch1_);
}
if (batch2_ != batch2) {
THCTensor_(free)(state, batch2_);
}
if (result_ != result) {
THCTensor_(freeCopyTo)(state, result_, result);
}
#if defined(THC_REAL_IS_BFLOAT16) && !(defined(__HIP_PLATFORM_HCC__) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000)
// To avoid "variable was set but never used" warning
[&transpose_batch1, &transpose_batch2, &lda, &ldb, &ldc]{}();
TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type");
#endif
}
at::namedinference::propagate_names_if_nonempty(result, maybe_outnames);
#else
ERROR_ONLY_FP_TYPES("baddbmm");
#endif
}
#endif
|
fa6206ce2a7516786ac590d2e6b3171f9dd101d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_13.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| fa6206ce2a7516786ac590d2e6b3171f9dd101d0.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_13.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
33790ddc290912f0773e4b8ec42b9d1114142cd7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <raft/linalg/cublas_wrappers.h>
#include <common/ml_benchmark.hpp>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
#include <raft/random/rng.cuh>
#include <sstream>
#include <string>
#include <vector>
namespace MLCommon {
namespace Bench {
namespace Matrix {
using namespace MLCommon::Matrix;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public Fixture {
GramMatrix(const std::string& name, const GramTestParams& p)
: Fixture(name), params(p), A(0, stream), B(0, stream), C(0, stream)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x"
<< p.n << "/" << (p.is_row_major ? "row_major" : "col_major");
this->SetName(oss.str().c_str());
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
kernel =
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}
~GramMatrix() { CUBLAS_CHECK_NO_THROW(hipblasDestroy(cublas_handle)); }
protected:
void allocateBuffers(const ::benchmark::State& state) override
{
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::Rng r(123456ULL);
r.uniform(A.data(), params.m * params.k, T(-1.0), T(1.0), stream);
r.uniform(B.data(), params.k * params.n, T(-1.0), T(1.0), stream);
}
void runBenchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loopOnState(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
hipblasHandle_t cublas_handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs());
ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs());
} // namespace Matrix
} // namespace Bench
} // namespace MLCommon
| 33790ddc290912f0773e4b8ec42b9d1114142cd7.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/matrix/kernelparams.h>
#include <raft/linalg/cublas_wrappers.h>
#include <common/ml_benchmark.hpp>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
#include <raft/random/rng.cuh>
#include <sstream>
#include <string>
#include <vector>
namespace MLCommon {
namespace Bench {
namespace Matrix {
using namespace MLCommon::Matrix;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public Fixture {
GramMatrix(const std::string& name, const GramTestParams& p)
: Fixture(name), params(p), A(0, stream), B(0, stream), C(0, stream)
{
std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"};
std::ostringstream oss;
oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x"
<< p.n << "/" << (p.is_row_major ? "row_major" : "col_major");
this->SetName(oss.str().c_str());
CUBLAS_CHECK(cublasCreate(&cublas_handle));
kernel =
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}
~GramMatrix() { CUBLAS_CHECK_NO_THROW(cublasDestroy(cublas_handle)); }
protected:
void allocateBuffers(const ::benchmark::State& state) override
{
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::Rng r(123456ULL);
r.uniform(A.data(), params.m * params.k, T(-1.0), T(1.0), stream);
r.uniform(B.data(), params.k * params.n, T(-1.0), T(1.0), stream);
}
void runBenchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loopOnState(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
cublasHandle_t cublas_handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs());
ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs());
} // namespace Matrix
} // namespace Bench
} // namespace MLCommon
|
38f25ddf59309784dde88b5637db97ac5306a256.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA codes for Projection calculation
#ifndef __LEN_WEIGHT_CAL_CU
#define __LEN_WEIGHT_CAL_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cutil_math.h>
#define BLOCK_DIM_X 8
#define BLOCK_DIM_Y 8
#define BLOCK_DIM_Z 4
texture<float, 3, hipReadModeElementType> tex_voxel; // 3D texture
#define PI 3.1415926
// structure for SART3D GPU implementation
typedef struct{
int x; // corresponds to the location of image samples
int y;
int z;
float wa; // corresponds to a_{ij}
float wt; // corresponds to t_{ij}
} WeightSample;
__global__ void len_weight_cal_kernel( float*, dim3, dim3, dim3, float, float, float, float);
extern "C"
void len_weight_cal_wrapper( hipArray* d_array_voxel,
float* d_proj_cur,
int num_depth, int num_height, int num_width,
int num_proj, int num_elevation, int num_ray,
float spacing, float voxel_size, float proj_pixel_size,
float SOD){
// setup execution parameters
int blockWidth, blockHeight, blockDepth, nBlockX, nBlockY, nBlockZ;
// Setting block size
if(num_depth == 1 ) {
blockWidth = 16;
blockHeight = 16;
blockDepth = 1;
}
else {
blockWidth = BLOCK_DIM_X;
blockHeight = BLOCK_DIM_Y;
blockDepth = BLOCK_DIM_Z;
}
// compute how many blocks are needed
nBlockX = ceil((float)num_ray / (float)blockWidth);
nBlockY = ceil((float)num_elevation / (float)blockHeight);
nBlockZ = ceil((float)num_proj / (float)blockDepth);
dim3 dimGrid(nBlockX, nBlockY*nBlockZ); // 3D grid is not supported on G80
dim3 dimBlock(blockWidth, blockHeight, blockDepth);
dim3 projdim(num_ray, num_elevation, num_proj);
dim3 griddim(nBlockX, nBlockY, nBlockZ);
dim3 imagedim( num_width, num_height, num_depth);
// set texture parameters
tex_voxel.normalized = false; // access with normalized texture coordinates
tex_voxel.filterMode = hipFilterModeLinear; // linear interpolation
tex_voxel.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates
tex_voxel.addressMode[1] = hipAddressModeClamp;
tex_voxel.addressMode[2] = hipAddressModeClamp;
// bind array to 3D texture
hipChannelFormatDesc float1Desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); // float
CUDA_SAFE_CALL(hipBindTextureToArray(tex_voxel, d_array_voxel, float1Desc));
// execute the kernel
hipLaunchKernelGGL(( len_weight_cal_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_proj_cur, projdim, griddim, imagedim,
spacing, voxel_size, proj_pixel_size, SOD);
CUDA_SAFE_CALL( hipUnbindTexture( tex_voxel ) );
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void
len_weight_cal_kernel( float* proj_cur, dim3 projdim, dim3 griddim, dim3 imagedim,
float spacing, float voxel_size, float proj_pixel_size, float SOD){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
// 2. apply kernel
if(idx < projdim.x && idy < projdim.y && idz < projdim.z)
{
float proj_res = 0.0f;
float len = 0.0f;
// rotation angle
float angle = 2 * PI * idz / projdim.z + PI / 2;
// source position
float xs = SOD * cosf( angle );
float ys = SOD * sinf( angle );
float zs = 0.0f;
// detector pixel position (-SDD, (idx - projdim.x / 2 ) * proj_pixel_size,
// (idy - projdim.y / 2 ) * proj_pixel_size )
// SDD should be zero here to utilize the virtual detector
// float xp = -SDD * cosf( angle ) + (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * sinf( -angle );
// float yp = -SDD * sinf( angle ) + (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * cosf( angle );
float xp = (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * sinf( -angle );
float yp = (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * cosf( angle );
float zp = (1.0f * idy - projdim.y / 2 ) * proj_pixel_size ;
// vector direction
len = sqrtf( (xp - xs) * (xp - xs) + (yp - ys) * (yp - ys) + (zp - zs) * (zp - zs) );
float dx = (xp - xs) * spacing / len;
float dy = (yp - ys) * spacing / len;
float dz = (zp - zs) * spacing / len;
// determine intersection points
// The bounded volumes is specified by (-imagedim.x/2, -imagedim.y/2, -imagedim.z/2)
// and (imagedim.x/2, imagedim.y/2, imagedim.z/2)
float t_near = -100.0f;
float t_far = 100.0f;
// calculate t_near
if( fabsf( xp - xs ) > 1e-7f ){
if( dx < 0.0f && t_near < ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs ) )
t_near = ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs );
if( dx > 0.0f && t_near < ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs ) )
t_near = ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs );
}
if( fabsf( yp - ys ) > 1e-7f ){
if( dy < 0.0f && t_near < ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys ) )
t_near = ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys );
if( dy > 0.0f && t_near < ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys ) )
t_near = ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys );
}
if( fabsf( zp - zs ) > 1e-7f ){
if( dz < 0.0f && t_near < ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs ) )
t_near = ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs );
if( dz > 0.0f && t_near < ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs ) )
t_near = ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs );
}
// calculate t_far
if( fabsf( xp - xs ) > 1e-7f ){
if( dx < 0.0f && t_far > ( -1.0f *imagedim.x/2 - xs ) / ( xp - xs ) )
t_far = ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs );
if( dx > 0.0f && t_far > ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs ) )
t_far = ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs );
}
if( fabsf( yp - ys ) > 1e-7f ){
if( dy < 0.0f && t_far > ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys ) )
t_far = ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys );
if( dy > 0.0f && t_far > ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys ) )
t_far = ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys );
}
if( fabsf( zp - zs ) > 1e-7f ){
if( dz < 0.0f && t_far > ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs ) )
t_far = ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs );
if( dz > 0.0f && t_far > ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs ) )
t_far = ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs );
}
if( fabsf( xp - xs ) < 1e-7f && ( xp < 0.0f || xp > 1.0f * imagedim.x / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( fabsf( yp - ys ) < 1e-7f && ( yp < 0.0f || yp > 1.0f * imagedim.y / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( fabsf( zp - zs ) < 1e-7f && ( zp < 0.0f || zp > 1.0f * imagedim.z / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( t_near > t_far ) // no intersection
proj_res = 0.0f;
else{
len = (t_far - t_near) * len + 1.0f;
int num_steps = (int) (len / spacing );
int i;
float x, y, z;
for( i = 0; i <= num_steps; i++ ){
x = xs + (xp - xs) * t_near + i * dx;
y = ys + (yp - ys) * t_near + i * dy;
z = zs + (zp - zs) * t_near + i * dz;
x = x / voxel_size + 1.0f * imagedim.x / 2;
y = y / voxel_size + 1.0f * imagedim.y / 2;
z = z / voxel_size + 1.0f * imagedim.z / 2;
proj_res += tex3D( tex_voxel, x + 0.5f, y + 0.5f, z + 0.5f) * spacing;
}
if( len - num_steps * spacing > 1e-7f ){
x = xs + (xp - xs) * t_far;
y = ys + (yp - ys) * t_far;
z = zs + (zp - zs) * t_far;
x = x / voxel_size + 1.0f * imagedim.x / 2;
y = y / voxel_size + 1.0f * imagedim.y / 2;
z = z / voxel_size + 1.0f * imagedim.z / 2;
proj_res += tex3D( tex_voxel, x + 0.5f, y + 0.5f, z + 0.5f) * (len - num_steps * spacing);
}
}
// proj_res = t_near;
// proj_res = t_far;
// proj_res = dz;
// proj_res = tex3D( tex_voxel, 1.0f * idx, 1.0f * idy, 1.0f * idz);
__syncthreads();
// store the result
uint outidx = ((idz)* projdim.y + (idy))*projdim.x + idx;
proj_cur[ outidx ] = proj_res;
}
}
#endif // __LEN_WEIGHT_CAL_CU | 38f25ddf59309784dde88b5637db97ac5306a256.cu | // CUDA codes for Projection calculation
#ifndef __LEN_WEIGHT_CAL_CU
#define __LEN_WEIGHT_CAL_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cutil_math.h>
#define BLOCK_DIM_X 8
#define BLOCK_DIM_Y 8
#define BLOCK_DIM_Z 4
texture<float, 3, cudaReadModeElementType> tex_voxel; // 3D texture
#define PI 3.1415926
// structure for SART3D GPU implementation
typedef struct{
int x; // corresponds to the location of image samples
int y;
int z;
float wa; // corresponds to a_{ij}
float wt; // corresponds to t_{ij}
} WeightSample;
__global__ void len_weight_cal_kernel( float*, dim3, dim3, dim3, float, float, float, float);
extern "C"
void len_weight_cal_wrapper( cudaArray* d_array_voxel,
float* d_proj_cur,
int num_depth, int num_height, int num_width,
int num_proj, int num_elevation, int num_ray,
float spacing, float voxel_size, float proj_pixel_size,
float SOD){
// setup execution parameters
int blockWidth, blockHeight, blockDepth, nBlockX, nBlockY, nBlockZ;
// Setting block size
if(num_depth == 1 ) {
blockWidth = 16;
blockHeight = 16;
blockDepth = 1;
}
else {
blockWidth = BLOCK_DIM_X;
blockHeight = BLOCK_DIM_Y;
blockDepth = BLOCK_DIM_Z;
}
// compute how many blocks are needed
nBlockX = ceil((float)num_ray / (float)blockWidth);
nBlockY = ceil((float)num_elevation / (float)blockHeight);
nBlockZ = ceil((float)num_proj / (float)blockDepth);
dim3 dimGrid(nBlockX, nBlockY*nBlockZ); // 3D grid is not supported on G80
dim3 dimBlock(blockWidth, blockHeight, blockDepth);
dim3 projdim(num_ray, num_elevation, num_proj);
dim3 griddim(nBlockX, nBlockY, nBlockZ);
dim3 imagedim( num_width, num_height, num_depth);
// set texture parameters
tex_voxel.normalized = false; // access with normalized texture coordinates
tex_voxel.filterMode = cudaFilterModeLinear; // linear interpolation
tex_voxel.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates
tex_voxel.addressMode[1] = cudaAddressModeClamp;
tex_voxel.addressMode[2] = cudaAddressModeClamp;
// bind array to 3D texture
cudaChannelFormatDesc float1Desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); // float
CUDA_SAFE_CALL(cudaBindTextureToArray(tex_voxel, d_array_voxel, float1Desc));
// execute the kernel
len_weight_cal_kernel<<< dimGrid, dimBlock >>>( d_proj_cur, projdim, griddim, imagedim,
spacing, voxel_size, proj_pixel_size, SOD);
CUDA_SAFE_CALL( cudaUnbindTexture( tex_voxel ) );
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void
len_weight_cal_kernel( float* proj_cur, dim3 projdim, dim3 griddim, dim3 imagedim,
float spacing, float voxel_size, float proj_pixel_size, float SOD){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
// 2. apply kernel
if(idx < projdim.x && idy < projdim.y && idz < projdim.z)
{
float proj_res = 0.0f;
float len = 0.0f;
// rotation angle
float angle = 2 * PI * idz / projdim.z + PI / 2;
// source position
float xs = SOD * cosf( angle );
float ys = SOD * sinf( angle );
float zs = 0.0f;
// detector pixel position (-SDD, (idx - projdim.x / 2 ) * proj_pixel_size,
// (idy - projdim.y / 2 ) * proj_pixel_size )
// SDD should be zero here to utilize the virtual detector
// float xp = -SDD * cosf( angle ) + (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * sinf( -angle );
// float yp = -SDD * sinf( angle ) + (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * cosf( angle );
float xp = (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * sinf( -angle );
float yp = (1.0f * idx - projdim.x / 2 ) * proj_pixel_size * cosf( angle );
float zp = (1.0f * idy - projdim.y / 2 ) * proj_pixel_size ;
// vector direction
len = sqrtf( (xp - xs) * (xp - xs) + (yp - ys) * (yp - ys) + (zp - zs) * (zp - zs) );
float dx = (xp - xs) * spacing / len;
float dy = (yp - ys) * spacing / len;
float dz = (zp - zs) * spacing / len;
// determine intersection points
// The bounded volumes is specified by (-imagedim.x/2, -imagedim.y/2, -imagedim.z/2)
// and (imagedim.x/2, imagedim.y/2, imagedim.z/2)
float t_near = -100.0f;
float t_far = 100.0f;
// calculate t_near
if( fabsf( xp - xs ) > 1e-7f ){
if( dx < 0.0f && t_near < ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs ) )
t_near = ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs );
if( dx > 0.0f && t_near < ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs ) )
t_near = ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs );
}
if( fabsf( yp - ys ) > 1e-7f ){
if( dy < 0.0f && t_near < ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys ) )
t_near = ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys );
if( dy > 0.0f && t_near < ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys ) )
t_near = ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys );
}
if( fabsf( zp - zs ) > 1e-7f ){
if( dz < 0.0f && t_near < ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs ) )
t_near = ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs );
if( dz > 0.0f && t_near < ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs ) )
t_near = ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs );
}
// calculate t_far
if( fabsf( xp - xs ) > 1e-7f ){
if( dx < 0.0f && t_far > ( -1.0f *imagedim.x/2 - xs ) / ( xp - xs ) )
t_far = ( -1.0f * imagedim.x/2 - xs ) / ( xp - xs );
if( dx > 0.0f && t_far > ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs ) )
t_far = ( 1.0f * imagedim.x/2 - 1.0f - xs ) / ( xp - xs );
}
if( fabsf( yp - ys ) > 1e-7f ){
if( dy < 0.0f && t_far > ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys ) )
t_far = ( -1.0f * imagedim.y/2 - ys ) / ( yp - ys );
if( dy > 0.0f && t_far > ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys ) )
t_far = ( 1.0f * imagedim.y/2 - 1.0f - ys ) / ( yp - ys );
}
if( fabsf( zp - zs ) > 1e-7f ){
if( dz < 0.0f && t_far > ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs ) )
t_far = ( -1.0f * imagedim.z/2 - zs ) / ( zp - zs );
if( dz > 0.0f && t_far > ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs ) )
t_far = ( 1.0f * imagedim.z/2 - 1.0f - zs ) / ( zp - zs );
}
if( fabsf( xp - xs ) < 1e-7f && ( xp < 0.0f || xp > 1.0f * imagedim.x / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( fabsf( yp - ys ) < 1e-7f && ( yp < 0.0f || yp > 1.0f * imagedim.y / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( fabsf( zp - zs ) < 1e-7f && ( zp < 0.0f || zp > 1.0f * imagedim.z / 2 - 1.0f ) ){
t_near = t_far + 1.0f;
}
if( t_near > t_far ) // no intersection
proj_res = 0.0f;
else{
len = (t_far - t_near) * len + 1.0f;
int num_steps = (int) (len / spacing );
int i;
float x, y, z;
for( i = 0; i <= num_steps; i++ ){
x = xs + (xp - xs) * t_near + i * dx;
y = ys + (yp - ys) * t_near + i * dy;
z = zs + (zp - zs) * t_near + i * dz;
x = x / voxel_size + 1.0f * imagedim.x / 2;
y = y / voxel_size + 1.0f * imagedim.y / 2;
z = z / voxel_size + 1.0f * imagedim.z / 2;
proj_res += tex3D( tex_voxel, x + 0.5f, y + 0.5f, z + 0.5f) * spacing;
}
if( len - num_steps * spacing > 1e-7f ){
x = xs + (xp - xs) * t_far;
y = ys + (yp - ys) * t_far;
z = zs + (zp - zs) * t_far;
x = x / voxel_size + 1.0f * imagedim.x / 2;
y = y / voxel_size + 1.0f * imagedim.y / 2;
z = z / voxel_size + 1.0f * imagedim.z / 2;
proj_res += tex3D( tex_voxel, x + 0.5f, y + 0.5f, z + 0.5f) * (len - num_steps * spacing);
}
}
// proj_res = t_near;
// proj_res = t_far;
// proj_res = dz;
// proj_res = tex3D( tex_voxel, 1.0f * idx, 1.0f * idy, 1.0f * idz);
__syncthreads();
// store the result
uint outidx = ((idz)* projdim.y + (idy))*projdim.x + idx;
proj_cur[ outidx ] = proj_res;
}
}
#endif // __LEN_WEIGHT_CAL_CU |
07da68a07dbf644fc1f8fa95f9298598e5263030.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
struct Uint2Sum
{
__host__ __device__ uint2 operator()(uint2& a, uint2& b)
{
//a.x += b.x;
a.y += b.y;
return a;
}
};
struct uint2_split
{
const uint2* d_res;
unsigned int * output;
uint2_split(const uint2* _d_res, unsigned int * _output):
d_res(_d_res), output(_output) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
output[i] = d_res[i].y;
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
hipMalloc(&d, char_size[type_index[colIndex]]*RecordCount);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
hipMalloc((void **) &d, sz*int_size);
hipMemcpy( d, (void *) hashes, sz*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
hipMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
hipFree(d);
hipFree(d_val);
hipFree(d_v);
hipFree(d_int);
};
bool CudaSet::isUnique(unsigned int colIndex) // run only on already sorted columns
{
if (not_compressed)
uniqueColumns[colIndex] = 0;
if (uniqueColumns.find(colIndex) == uniqueColumns.end()) {
if(mRecCount == 1 )
uniqueColumns[colIndex] = 1;
else {
thrust::device_ptr<unsigned int> d_group = thrust::device_malloc<unsigned int>(mRecCount-1);
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
unsigned int grp_count = thrust::reduce(d_group, d_group+mRecCount-1);
if(grp_count == mRecCount-1)
uniqueColumns[colIndex] = 1;
else
uniqueColumns[colIndex] = 0;
};
};
return uniqueColumns[colIndex];
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++)
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
prealloc_char_size = mRecCount;
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
};
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[Recs*char_size[type_index[i]]];
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
hipFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
hipFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
hipFree(d_columns_char[type_index[colIndex]]);
void *d;
hipMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
};
};
a->char_size = char_size;
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else {
decompress_char(f, colIndex, segNum);
};
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
hipMalloc((void **) &d, sz*len);
hipMemcpy( d, (void *) d_array, sz*len, hipMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
hipMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//hipMalloc((void **) &d_char, real_count*len);
//hipMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// hipFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
hipFree(d);
hipFree(d_val);
hipFree(d_v);
hipFree(d_int);
}
void CudaSet::CopyToGpu(unsigned int offset, unsigned int count)
{
if (not_compressed) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[i]], h_columns_char[type_index[i]], char_size[type_index[i]]*(offset + count), hipMemcpyHostToDevice);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
};
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
else
hipMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
}
else {
pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*(offset + count), hipMemcpyHostToDevice);
};
}
else {
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
hipMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, hipMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
hipFree(grp);
CUDA_SAFE_CALL(hipMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::addHostColumn(int_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
if (!one_line) {
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(old_reccount));
type_index[colIndex] = h_columns_int.size()-1;
}
else {
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(1));
type_index[colIndex] = h_columns_int.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_int[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<int_type> src(col);
(h_columns_int[type_index[colIndex]])[0] = (h_columns_int[type_index[colIndex]])[0] + src[0];
};
};
void CudaSet::addHostColumn(float_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
if (!one_line) {
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(old_reccount));
type_index[colIndex] = h_columns_float.size()-1;
}
else {
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(1));
type_index[colIndex] = h_columns_float.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_float[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<float_type> src(col);
(h_columns_float[type_index[colIndex]])[0] = (h_columns_float[type_index[colIndex]])[0] + src[0];
};
};
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
// cout << "mcount " << mCount << " " << prm.size() << " " << keep << endl;
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
compress_char(str, i, mCount);
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
hipFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
// do it for every segment
// will add this later
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount)
{
std::vector<string> v1;
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + i*len, char_size[type_index[index]]);
v1.push_back(field);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
void CudaSet::LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[1000];
char* field;
unsigned int current_column = 1;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {//char
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
};
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
hipFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);;
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
not_compressed = 0;
mRecCount = Recs;
load_file_name = file_name;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
unsigned int i = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
queue<string> names(op_sel);
while(!names.empty()) {
columnNames[names.front()] = i;
names.pop();
i++;
};
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
prealloc_char_size = 0;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
prealloc_char_size = 0;
};
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
};
};
fields.pop();
};
};
}
unsigned int largest_prm(CudaSet* a)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, hipMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
//cout << "copy " << fields.front() << " " << alloced_switch << endl;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
//cout << "gather " << fields.front() << endl;
gatherColumns(a, t, fields.front(), segment, count);
//cout << "end " << endl;
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment); // segment i
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
}
else {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
| 07da68a07dbf644fc1f8fa95f9298598e5263030.cu | /*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
struct Uint2Sum
{
__host__ __device__ uint2 operator()(uint2& a, uint2& b)
{
//a.x += b.x;
a.y += b.y;
return a;
}
};
struct uint2_split
{
const uint2* d_res;
unsigned int * output;
uint2_split(const uint2* _d_res, unsigned int * _output):
d_res(_d_res), output(_output) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
output[i] = d_res[i].y;
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
cudaMalloc(&d, char_size[type_index[colIndex]]*RecordCount);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
cudaMalloc((void **) &d, sz*int_size);
cudaMemcpy( d, (void *) hashes, sz*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
cudaMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v);
cudaFree(d_int);
};
bool CudaSet::isUnique(unsigned int colIndex) // run only on already sorted columns
{
if (not_compressed)
uniqueColumns[colIndex] = 0;
if (uniqueColumns.find(colIndex) == uniqueColumns.end()) {
if(mRecCount == 1 )
uniqueColumns[colIndex] = 1;
else {
thrust::device_ptr<unsigned int> d_group = thrust::device_malloc<unsigned int>(mRecCount-1);
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
unsigned int grp_count = thrust::reduce(d_group, d_group+mRecCount-1);
if(grp_count == mRecCount-1)
uniqueColumns[colIndex] = 1;
else
uniqueColumns[colIndex] = 0;
};
};
return uniqueColumns[colIndex];
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++)
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
prealloc_char_size = mRecCount;
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
};
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[Recs*char_size[type_index[i]]];
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
cudaFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
cudaFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
cudaFree(d_columns_char[type_index[colIndex]]);
void *d;
cudaMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
};
};
a->char_size = char_size;
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else {
decompress_char(f, colIndex, segNum);
};
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
cudaMalloc((void **) &d, sz*len);
cudaMemcpy( d, (void *) d_array, sz*len, cudaMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
cudaMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//cudaMalloc((void **) &d_char, real_count*len);
//cudaMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// cudaFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v);
cudaFree(d_int);
}
void CudaSet::CopyToGpu(unsigned int offset, unsigned int count)
{
if (not_compressed) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[i]], h_columns_char[type_index[i]], char_size[type_index[i]]*(offset + count), cudaMemcpyHostToDevice);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
};
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
else
cudaMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
}
else {
pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, &mRecCount, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*(offset + count), cudaMemcpyHostToDevice);
};
}
else {
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
cudaMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, cudaMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
cudaFree(grp);
CUDA_SAFE_CALL(cudaMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, int_type recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::addHostColumn(int_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
if (!one_line) {
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(old_reccount));
type_index[colIndex] = h_columns_int.size()-1;
}
else {
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(1));
type_index[colIndex] = h_columns_int.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_int[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<int_type> src(col);
(h_columns_int[type_index[colIndex]])[0] = (h_columns_int[type_index[colIndex]])[0] + src[0];
};
};
void CudaSet::addHostColumn(float_type* col, int colIndex, string colName, int_type recCount, int_type old_reccount, bool one_line)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
if (!one_line) {
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(old_reccount));
type_index[colIndex] = h_columns_float.size()-1;
}
else {
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(1));
type_index[colIndex] = h_columns_float.size()-1;
};
};
if (!one_line) {
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, h_columns_float[type_index[colIndex]].begin() + mRecCount);
}
else {
thrust::device_ptr<float_type> src(col);
(h_columns_float[type_index[colIndex]])[0] = (h_columns_float[type_index[colIndex]])[0] + src[0];
};
};
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
// cout << "mcount " << mCount << " " << prm.size() << " " << keep << endl;
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
compress_char(str, i, mCount);
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
cudaFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
// do it for every segment
// will add this later
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount)
{
std::vector<string> v1;
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + i*len, char_size[type_index[index]]);
v1.push_back(field);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
void CudaSet::LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[1000];
char* field;
unsigned int current_column = 1;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {//char
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
};
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
cudaFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);;
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
not_compressed = 0;
mRecCount = Recs;
load_file_name = file_name;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
unsigned int i = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
queue<string> names(op_sel);
while(!names.empty()) {
columnNames[names.front()] = i;
names.pop();
i++;
};
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
prealloc_char_size = 0;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
prealloc_char_size = 0;
};
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
};
};
fields.pop();
};
};
}
unsigned int largest_prm(CudaSet* a)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, cudaMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
//cout << "copy " << fields.front() << " " << alloced_switch << endl;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
//cout << "gather " << fields.front() << endl;
gatherColumns(a, t, fields.front(), segment, count);
//cout << "end " << endl;
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment); // segment i
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
|
f7a11f45451c2590fd69f33995422b6fa3df1ecd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
hipblasHandle_t cublasH = NULL;
hipStream_t stream = NULL;
const int m = 2;
const int n = 2;
const int k = 2;
const int lda = 2;
const int ldb = 2;
const int ldc = 2;
const int batch_count = 2;
/*
* A = | 1.0 | 2.0 | 5.0 | 6.0 |
* | 3.0 | 4.0 | 7.0 | 8.0 |
*
* B = | 5.0 | 6.0 | 9.0 | 10.0 |
* | 7.0 | 8.0 | 11.0 | 12.0 |
*/
const std::vector<std::vector<data_type>> A_array = {{1.0 ,3.0, 2.0, 4.0},
{5.0, 7.0, 6.0, 8.0}};
const std::vector<std::vector<data_type>> B_array = {{5.0, 7.0, 6.0, 8.0},
{9.0, 11.0, 10.0, 12.0}};
std::vector<std::vector<data_type>> C_array(batch_count, std::vector<data_type>(m * n));
const data_type alpha = 1.0;
const data_type beta = 0.0;
data_type **d_A_array = nullptr;
data_type **d_B_array = nullptr;
data_type **d_C_array = nullptr;
std::vector<data_type *> d_A(batch_count, nullptr);
std::vector<data_type *> d_B(batch_count, nullptr);
std::vector<data_type *> d_C(batch_count, nullptr);
hipblasOperation_t transa = HIPBLAS_OP_N;
hipblasOperation_t transb = HIPBLAS_OP_N;
printf("A[0]\n");
print_matrix(m, k, A_array[0].data(), lda);
printf("=====\n");
printf("A[1]\n");
print_matrix(m, k, A_array[1].data(), lda);
printf("=====\n");
printf("B[0]\n");
print_matrix(k, n, B_array[0].data(), ldb);
printf("=====\n");
printf("B[1]\n");
print_matrix(k, n, B_array[1].data(), ldb);
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUBLAS_CHECK(hipblasSetStream(cublasH, stream));
/* step 2: copy data to device */
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_A[i]), sizeof(data_type) * A_array[i].size()));
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_B[i]), sizeof(data_type) * B_array[i].size()));
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_C[i]), sizeof(data_type) * C_array[i].size()));
}
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_A_array), sizeof(data_type *) * batch_count));
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_B_array), sizeof(data_type *) * batch_count));
CUDA_CHECK(
hipMalloc(reinterpret_cast<void **>(&d_C_array), sizeof(data_type *) * batch_count));
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(hipMemcpyAsync(d_A[i], A_array[i].data(), sizeof(data_type) * A_array[i].size(),
hipMemcpyHostToDevice, stream));
CUDA_CHECK(hipMemcpyAsync(d_B[i], B_array[i].data(), sizeof(data_type) * B_array[i].size(),
hipMemcpyHostToDevice, stream));
}
CUDA_CHECK(hipMemcpyAsync(d_A_array, d_A.data(), sizeof(data_type *) * batch_count,
hipMemcpyHostToDevice, stream));
CUDA_CHECK(hipMemcpyAsync(d_B_array, d_B.data(), sizeof(data_type *) * batch_count,
hipMemcpyHostToDevice, stream));
CUDA_CHECK(hipMemcpyAsync(d_C_array, d_C.data(), sizeof(data_type *) * batch_count,
hipMemcpyHostToDevice, stream));
/* step 3: compute */
CUBLAS_CHECK(hipblasDgemmBatched(cublasH, transa, transb, m, n, k, &alpha, d_A_array, lda,
d_B_array, ldb, &beta, d_C_array, ldc, batch_count));
/* step 4: copy data to host */
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(hipMemcpyAsync(C_array[i].data(), d_C[i], sizeof(data_type) * C_array[i].size(),
hipMemcpyDeviceToHost, stream));
}
CUDA_CHECK(hipStreamSynchronize(stream));
/*
* C = | 19.0 | 22.0 | 111.0 | 122.0 |
* | 43.0 | 50.0 | 151.0 | 166.0 |
*/
printf("C[0]\n");
print_matrix(m, n, C_array[0].data(), ldc);
printf("=====\n");
printf("C[1]\n");
print_matrix(m, n, C_array[1].data(), ldc);
printf("=====\n");
/* free resources */
CUDA_CHECK(hipFree(d_A_array));
CUDA_CHECK(hipFree(d_B_array));
CUDA_CHECK(hipFree(d_C_array));
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(hipFree(d_A[i]));
CUDA_CHECK(hipFree(d_B[i]));
CUDA_CHECK(hipFree(d_C[i]));
}
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| f7a11f45451c2590fd69f33995422b6fa3df1ecd.cu | /*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "cublas_utils.h"
using data_type = double;
int main(int argc, char *argv[]) {
cublasHandle_t cublasH = NULL;
cudaStream_t stream = NULL;
const int m = 2;
const int n = 2;
const int k = 2;
const int lda = 2;
const int ldb = 2;
const int ldc = 2;
const int batch_count = 2;
/*
* A = | 1.0 | 2.0 | 5.0 | 6.0 |
* | 3.0 | 4.0 | 7.0 | 8.0 |
*
* B = | 5.0 | 6.0 | 9.0 | 10.0 |
* | 7.0 | 8.0 | 11.0 | 12.0 |
*/
const std::vector<std::vector<data_type>> A_array = {{1.0 ,3.0, 2.0, 4.0},
{5.0, 7.0, 6.0, 8.0}};
const std::vector<std::vector<data_type>> B_array = {{5.0, 7.0, 6.0, 8.0},
{9.0, 11.0, 10.0, 12.0}};
std::vector<std::vector<data_type>> C_array(batch_count, std::vector<data_type>(m * n));
const data_type alpha = 1.0;
const data_type beta = 0.0;
data_type **d_A_array = nullptr;
data_type **d_B_array = nullptr;
data_type **d_C_array = nullptr;
std::vector<data_type *> d_A(batch_count, nullptr);
std::vector<data_type *> d_B(batch_count, nullptr);
std::vector<data_type *> d_C(batch_count, nullptr);
cublasOperation_t transa = CUBLAS_OP_N;
cublasOperation_t transb = CUBLAS_OP_N;
printf("A[0]\n");
print_matrix(m, k, A_array[0].data(), lda);
printf("=====\n");
printf("A[1]\n");
print_matrix(m, k, A_array[1].data(), lda);
printf("=====\n");
printf("B[0]\n");
print_matrix(k, n, B_array[0].data(), ldb);
printf("=====\n");
printf("B[1]\n");
print_matrix(k, n, B_array[1].data(), ldb);
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUBLAS_CHECK(cublasSetStream(cublasH, stream));
/* step 2: copy data to device */
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_A[i]), sizeof(data_type) * A_array[i].size()));
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_B[i]), sizeof(data_type) * B_array[i].size()));
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_C[i]), sizeof(data_type) * C_array[i].size()));
}
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_A_array), sizeof(data_type *) * batch_count));
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_B_array), sizeof(data_type *) * batch_count));
CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&d_C_array), sizeof(data_type *) * batch_count));
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(cudaMemcpyAsync(d_A[i], A_array[i].data(), sizeof(data_type) * A_array[i].size(),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(d_B[i], B_array[i].data(), sizeof(data_type) * B_array[i].size(),
cudaMemcpyHostToDevice, stream));
}
CUDA_CHECK(cudaMemcpyAsync(d_A_array, d_A.data(), sizeof(data_type *) * batch_count,
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(d_B_array, d_B.data(), sizeof(data_type *) * batch_count,
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(d_C_array, d_C.data(), sizeof(data_type *) * batch_count,
cudaMemcpyHostToDevice, stream));
/* step 3: compute */
CUBLAS_CHECK(cublasDgemmBatched(cublasH, transa, transb, m, n, k, &alpha, d_A_array, lda,
d_B_array, ldb, &beta, d_C_array, ldc, batch_count));
/* step 4: copy data to host */
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(cudaMemcpyAsync(C_array[i].data(), d_C[i], sizeof(data_type) * C_array[i].size(),
cudaMemcpyDeviceToHost, stream));
}
CUDA_CHECK(cudaStreamSynchronize(stream));
/*
* C = | 19.0 | 22.0 | 111.0 | 122.0 |
* | 43.0 | 50.0 | 151.0 | 166.0 |
*/
printf("C[0]\n");
print_matrix(m, n, C_array[0].data(), ldc);
printf("=====\n");
printf("C[1]\n");
print_matrix(m, n, C_array[1].data(), ldc);
printf("=====\n");
/* free resources */
CUDA_CHECK(cudaFree(d_A_array));
CUDA_CHECK(cudaFree(d_B_array));
CUDA_CHECK(cudaFree(d_C_array));
for (int i = 0; i < batch_count; i++) {
CUDA_CHECK(cudaFree(d_A[i]));
CUDA_CHECK(cudaFree(d_B[i]));
CUDA_CHECK(cudaFree(d_C[i]));
}
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
35a945b84a576093aa3503e3b1b652ac47ef05ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "CUDA.h"
#include "kernels.h"
#include "../Common/Utils.h"
#include <cstdio>
#include <algorithm>
CUDA_HOST_DEVICE int32_t fibonacci(int32_t n) {
if (n == 0) {
return 0;
}
if (n == 1) {
return 1;
}
return fibonacci(n - 1) + fibonacci(n - 2);
}
int main()
{
Utils::reportGPUUsageInfo();
Utils::queryDeviceProperties();
constexpr auto width = 800;
constexpr auto height = 800;
//juliaKernel(width, height);
//GPUTiming(addKernel, "aaa", "bbb");
//waveKernel(width, height, 100);
//sharedMemoryKernel(width, height);
rayTracingKernel(width, height);
return 0;
}
| 35a945b84a576093aa3503e3b1b652ac47ef05ab.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "CUDA.h"
#include "kernels.h"
#include "../Common/Utils.h"
#include <cstdio>
#include <algorithm>
CUDA_HOST_DEVICE int32_t fibonacci(int32_t n) {
if (n == 0) {
return 0;
}
if (n == 1) {
return 1;
}
return fibonacci(n - 1) + fibonacci(n - 2);
}
int main()
{
Utils::reportGPUUsageInfo();
Utils::queryDeviceProperties();
constexpr auto width = 800;
constexpr auto height = 800;
//juliaKernel(width, height);
//GPUTiming(addKernel, "aaa", "bbb");
//waveKernel(width, height, 100);
//sharedMemoryKernel(width, height);
rayTracingKernel(width, height);
return 0;
}
|
fce296ae6c93d4c952b53d0ebd40798995854317.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD2;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD2)&&(i < len); i++){
d_Data[i] = d_Src[i * p_n];
}
if(i == len){
d_Data[len] = size;
}
}
__global__ void getValue_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(hipMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(hipFree(d_Buf));
checkCudaErrors(hipFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared3), dim3(blockCount3), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
//arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//add d_Buf to each array of d_Dst
hipLaunchKernelGGL(( uniformUpdate), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( uniformUpdate2), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( diff_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( transport_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
}
//transport input data to output per diff
extern "C" void getValue_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( getValue_kernel), dim3(1), dim3(1), 0, 0,
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
}
| fce296ae6c93d4c952b53d0ebd40798995854317.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD2;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD2)&&(i < len); i++){
d_Data[i] = d_Src[i * p_n];
}
if(i == len){
d_Data[len] = size;
}
}
__global__ void getValue_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(cudaMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(cudaFree(d_Buf));
checkCudaErrors(cudaFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
scanExclusiveShared3<<< blockCount3, THREADBLOCK_SIZE>>>(
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
//arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//add d_Buf to each array of d_Dst
uniformUpdate<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(cudaDeviceSynchronize());
uniformUpdate2<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
diff_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
transport_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
}
//transport input data to output per diff
extern "C" void getValue_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
getValue_kernel<<<1, 1>>>(
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
}
|
b1e2d429b32e95f09ea842ca05c2eb7e2c7f4934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "beam_search_impl.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "cub/util_type.cuh"
namespace onnxruntime {
namespace contrib {
namespace cuda {
__global__ void InitKernel(float* beam_scores,
int num_beams,
int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
int beam_index = index % num_beams;
beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f;
}
}
void LaunchInitKernel(
float* beam_scores,
int batch_size,
int num_beams,
hipStream_t stream) {
int total_elements = batch_size * num_beams;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( InitKernel), dim3(gridSize), dim3(blockSize), 0, stream, beam_scores, num_beams, total_elements);
}
__global__ void NextTokenKernel(const int64_t* next_token_indices,
int32_t* next_indices,
int32_t* next_tokens,
int vocab_size,
int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
next_indices[index] = next_token_indices[index] / vocab_size;
next_tokens[index] = next_token_indices[index] % vocab_size;
}
}
void LaunchNextTokenKernel(const int64_t* next_token_indices,
int32_t* next_indices,
int32_t* next_tokens,
int batch_size,
int top_k,
int vocab_size,
hipStream_t stream) {
int total_elements = batch_size * top_k;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( NextTokenKernel), dim3(gridSize), dim3(blockSize), 0, stream, next_token_indices, next_indices, next_tokens, vocab_size, total_elements);
}
template <typename T>
__global__ void LogitsProcessKernel(
T* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int num_beams,
int vocab_size,
int total_elements,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
int batch_beam_index = index / vocab_size;
int word_id = index % vocab_size;
// RepetitionPenaltyLogitsProcessor
if (repetition_penalty != 1.0f) {
int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length;
bool found = false;
for (int i = 0; i < current_sequence_length; i++) {
if (current_sequence[i] == word_id) {
found = true;
break;
}
}
if (found) {
float score = (float)next_token_scores[index];
next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty);
}
}
// NoRepeatNGramLogitsProcessor
if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) {
int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length;
bool found = false;
for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) {
if (current_sequence[i] == word_id) { // last token of n-gram matched
found = true;
for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens
if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) {
found = false;
break;
}
}
if (found) {
break;
}
}
}
if (found) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
}
// VocabMaskLogitsProcessor
if (vocab_mask != nullptr && vocab_mask[word_id] == 0) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
// PrefixVocabMaskLogitsProcessor
int batch_id = batch_beam_index / num_beams;
if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
// MinLengthLogitsProcessor
if (word_id == demote_token_id) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
}
}
}
template <typename T>
void LaunchLogitsProcessKernel(
T* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
hipStream_t stream) {
int total_elements = batch_size * num_beams * vocab_size;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( LogitsProcessKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, next_token_scores, vocab_mask, prefix_vocab_mask, num_beams, vocab_size, total_elements, demote_token_id,
sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size);
}
// Instantiation
template void LaunchLogitsProcessKernel(
float* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
hipStream_t stream);
template void LaunchLogitsProcessKernel(
half* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
hipStream_t stream);
__global__ void AddProbsKernel(float* log_probs,
float* cum_log_probs,
const int vocab_size,
const int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_beam_index = index / vocab_size;
if (index < total_elements)
log_probs[index] += cum_log_probs[batch_beam_index];
}
template <typename T>
void LaunchAddProbsKernel(T* log_probs,
T* cum_log_probs,
const int batch_size,
const int num_beams,
const int vocab_size,
hipStream_t stream) {
int total_elements = batch_size * num_beams * vocab_size;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( AddProbsKernel), dim3(gridSize), dim3(blockSize), 0, stream, log_probs, cum_log_probs, vocab_size, total_elements);
}
template void LaunchAddProbsKernel(
float* log_probs,
float* cum_log_probs,
const int batch_size,
const int num_beams,
const int vocab_size,
hipStream_t stream);
template <typename T>
__global__ void UpdateInputsKernel(const T* old_mask_data,
T* mask_data,
int32_t* next_positions,
int batch_beam_size,
int current_length) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < batch_beam_size * current_length) {
// Update attention mask.
int i = index / current_length;
int j = index % current_length;
mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1);
// Update sequence length (or next positions).
if (index < batch_beam_size) {
next_positions[index]++;
}
}
}
void LaunchUpdateKernel(const int32_t* old_mask_data,
int32_t* mask_data,
int32_t* next_positions,
int batch_beam_size,
int current_length,
hipStream_t stream) {
assert(current_length > 0);
int total_elements = batch_beam_size * current_length;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( UpdateInputsKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, stream, old_mask_data, mask_data, next_positions, batch_beam_size, current_length);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime | b1e2d429b32e95f09ea842ca05c2eb7e2c7f4934.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "beam_search_impl.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "cub/util_type.cuh"
namespace onnxruntime {
namespace contrib {
namespace cuda {
__global__ void InitKernel(float* beam_scores,
int num_beams,
int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
int beam_index = index % num_beams;
beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f;
}
}
void LaunchInitKernel(
float* beam_scores,
int batch_size,
int num_beams,
cudaStream_t stream) {
int total_elements = batch_size * num_beams;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
InitKernel<<<gridSize, blockSize, 0, stream>>>(beam_scores, num_beams, total_elements);
}
__global__ void NextTokenKernel(const int64_t* next_token_indices,
int32_t* next_indices,
int32_t* next_tokens,
int vocab_size,
int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
next_indices[index] = next_token_indices[index] / vocab_size;
next_tokens[index] = next_token_indices[index] % vocab_size;
}
}
void LaunchNextTokenKernel(const int64_t* next_token_indices,
int32_t* next_indices,
int32_t* next_tokens,
int batch_size,
int top_k,
int vocab_size,
cudaStream_t stream) {
int total_elements = batch_size * top_k;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
NextTokenKernel<<<gridSize, blockSize, 0, stream>>>(next_token_indices, next_indices, next_tokens, vocab_size, total_elements);
}
template <typename T>
__global__ void LogitsProcessKernel(
T* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int num_beams,
int vocab_size,
int total_elements,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < total_elements) {
int batch_beam_index = index / vocab_size;
int word_id = index % vocab_size;
// RepetitionPenaltyLogitsProcessor
if (repetition_penalty != 1.0f) {
int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length;
bool found = false;
for (int i = 0; i < current_sequence_length; i++) {
if (current_sequence[i] == word_id) {
found = true;
break;
}
}
if (found) {
float score = (float)next_token_scores[index];
next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty);
}
}
// NoRepeatNGramLogitsProcessor
if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) {
int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length;
bool found = false;
for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) {
if (current_sequence[i] == word_id) { // last token of n-gram matched
found = true;
for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens
if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) {
found = false;
break;
}
}
if (found) {
break;
}
}
}
if (found) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
}
// VocabMaskLogitsProcessor
if (vocab_mask != nullptr && vocab_mask[word_id] == 0) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
// PrefixVocabMaskLogitsProcessor
int batch_id = batch_beam_index / num_beams;
if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
return;
}
// MinLengthLogitsProcessor
if (word_id == demote_token_id) {
next_token_scores[index] = cub::FpLimits<T>::Lowest();
}
}
}
template <typename T>
void LaunchLogitsProcessKernel(
T* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
cudaStream_t stream) {
int total_elements = batch_size * num_beams * vocab_size;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
LogitsProcessKernel<T><<<gridSize, blockSize, 0, stream>>>(next_token_scores, vocab_mask, prefix_vocab_mask, num_beams, vocab_size, total_elements, demote_token_id,
sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size);
}
// Instantiation
template void LaunchLogitsProcessKernel(
float* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
cudaStream_t stream);
template void LaunchLogitsProcessKernel(
half* next_token_scores,
const int* vocab_mask,
const int* prefix_vocab_mask,
int batch_size,
int num_beams,
int vocab_size,
int demote_token_id,
int32_t* sequences,
int max_sequence_length,
int current_sequence_length,
float repetition_penalty,
int no_repeat_ngram_size,
cudaStream_t stream);
__global__ void AddProbsKernel(float* log_probs,
float* cum_log_probs,
const int vocab_size,
const int total_elements) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_beam_index = index / vocab_size;
if (index < total_elements)
log_probs[index] += cum_log_probs[batch_beam_index];
}
template <typename T>
void LaunchAddProbsKernel(T* log_probs,
T* cum_log_probs,
const int batch_size,
const int num_beams,
const int vocab_size,
cudaStream_t stream) {
int total_elements = batch_size * num_beams * vocab_size;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
AddProbsKernel<<<gridSize, blockSize, 0, stream>>>(log_probs, cum_log_probs, vocab_size, total_elements);
}
template void LaunchAddProbsKernel(
float* log_probs,
float* cum_log_probs,
const int batch_size,
const int num_beams,
const int vocab_size,
cudaStream_t stream);
template <typename T>
__global__ void UpdateInputsKernel(const T* old_mask_data,
T* mask_data,
int32_t* next_positions,
int batch_beam_size,
int current_length) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < batch_beam_size * current_length) {
// Update attention mask.
int i = index / current_length;
int j = index % current_length;
mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1);
// Update sequence length (or next positions).
if (index < batch_beam_size) {
next_positions[index]++;
}
}
}
void LaunchUpdateKernel(const int32_t* old_mask_data,
int32_t* mask_data,
int32_t* next_positions,
int batch_beam_size,
int current_length,
cudaStream_t stream) {
assert(current_length > 0);
int total_elements = batch_beam_size * current_length;
constexpr int blockSize = 256;
const int gridSize = (total_elements + blockSize - 1) / blockSize;
UpdateInputsKernel<int32_t><<<gridSize, blockSize, 0, stream>>>(old_mask_data, mask_data, next_positions, batch_beam_size, current_length);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime |
4d692451d71e6df68ff9869e3ed851c378d34f52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _drop32(int n, float *x, float *xmask, float dropout, float scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) x[i] = 0;
else x[i] *= scale;
i += blockDim.x * gridDim.x;
}
} | 4d692451d71e6df68ff9869e3ed851c378d34f52.cu | #include "includes.h"
__global__ void _drop32(int n, float *x, float *xmask, float dropout, float scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) x[i] = 0;
else x[i] *= scale;
i += blockDim.x * gridDim.x;
}
} |
f4abc6b9f2e8eb975a1d321ae989c77ae0546e48.hip | // !!! This is a file automatically generated by hipify!!!
#include "dark_cuda.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
//#ifdef OPENCV
//#include <opencv2/highgui/highgui_c.h>
//#endif
#include "http_stream.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
typedef struct time_benchmark_layers {
float time;
int layer_id, layer_type;
} time_benchmark_layers;
int time_comparator(const void *pa, const void *pb)
{
time_benchmark_layers a = *(time_benchmark_layers *)pa;
time_benchmark_layers b = *(time_benchmark_layers *)pb;
float diff = a.time - b.time;
if (diff < 0) return 1;
else if (diff > 0) return -1;
return 0;
}
void forward_network_gpu(network net, network_state state)
{
static time_benchmark_layers *avg_time_per_layer = NULL;
static time_benchmark_layers *sorted_avg_time_per_layer = NULL;
double start_time, end_time;
if (net.benchmark_layers) {
if (!avg_time_per_layer) {
avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
sorted_avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
}
hipDeviceSynchronize();
}
//printf("\n");
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu && state.train){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
if (net.benchmark_layers) {
start_time = get_time_point();
}
l.forward_gpu(l, state);
if (net.benchmark_layers) {
CHECK_CUDA(hipDeviceSynchronize());
end_time = get_time_point();
const double took_time = (end_time - start_time) / 1000;
const double alpha = 0.9;
if (avg_time_per_layer[i].time == 0) {
avg_time_per_layer[i].layer_id = i;
avg_time_per_layer[i].layer_type = l.type;
avg_time_per_layer[i].time = took_time;
}
else avg_time_per_layer[i].time = avg_time_per_layer[i].time * alpha + took_time * (1 - alpha);
sorted_avg_time_per_layer[i] = avg_time_per_layer[i];
printf("\n fw-layer %d - type: %d - %lf ms - avg_time %lf ms \n", i, l.type, took_time, avg_time_per_layer[i].time);
}
if(net.wait_stream)
hipStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
//hipDeviceSynchronize();
/*
cuda_pull_array(l.output_gpu, l.output, l.outputs);
hipStreamSynchronize(get_cuda_stream());
float avg_val = 0;
int k;
for (k = 0; k < l.outputs; ++k) avg_val += l.output[k];
printf(" i: %d - avg_val = %f \n", i, avg_val / l.outputs);
*/
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
save_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
if (net.benchmark_layers) {
printf("\n\nSorted by time (forward):\n");
qsort(sorted_avg_time_per_layer, net.n, sizeof(time_benchmark_layers), time_comparator);
for (i = 0; i < net.n; ++i) {
//printf("layer %d - type: %d - avg_time %lf ms \n", avg_time_per_layer[i].layer_id, avg_time_per_layer[i].layer_type, avg_time_per_layer[i].time);
printf("%d - fw-sort-layer %d - type: %d - avg_time %lf ms \n", i, sorted_avg_time_per_layer[i].layer_id, sorted_avg_time_per_layer[i].layer_type, sorted_avg_time_per_layer[i].time);
}
}
//hipStreamSynchronize(get_cuda_stream()); // sync CUDA-functions
//hipDeviceSynchronize();
}
void backward_network_gpu(network net, network_state state)
{
static time_benchmark_layers *avg_time_per_layer = NULL;
static time_benchmark_layers *sorted_avg_time_per_layer = NULL;
double start_time, end_time;
if (net.benchmark_layers) {
if (!avg_time_per_layer) {
avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
sorted_avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
}
hipDeviceSynchronize();
}
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward == 1) break;
if (l.stopbackward > get_current_iteration(net)) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
if (net.optimized_memory && !prev.keep_delta_gpu) {
state.delta = net.state_delta_gpu;
}
}
if (l.onlyforward) continue;
if (net.benchmark_layers) {
start_time = get_time_point();
}
l.backward_gpu(l, state);
if (net.benchmark_layers) {
CHECK_CUDA(hipDeviceSynchronize());
end_time = get_time_point();
const double took_time = (end_time - start_time) / 1000;
const double alpha = 0.9;
if (avg_time_per_layer[i].time == 0) {
avg_time_per_layer[i].layer_id = i;
avg_time_per_layer[i].layer_type = l.type;
avg_time_per_layer[i].time = took_time;
}
else avg_time_per_layer[i].time = avg_time_per_layer[i].time * alpha + took_time * (1 - alpha);
sorted_avg_time_per_layer[i] = avg_time_per_layer[i];
printf("\n bw-layer %d - type: %d - %lf ms - avg_time %lf ms \n", i, l.type, took_time, avg_time_per_layer[i].time);
}
if (i != 0) {
layer prev = net.layers[i - 1];
if (net.optimized_memory && state.delta && !prev.keep_delta_gpu) {
if (prev.delta_gpu != state.delta) simple_copy_ongpu(prev.outputs*prev.batch, state.delta, prev.delta_gpu);
fill_ongpu(prev.outputs*prev.batch, 0, net.state_delta_gpu, 1);
}
}
/*
if(i != 0)
{
layer l = net.layers[i - 1];
int state_delta_nan_inf = is_nan_or_inf(state.delta, l.outputs * l.batch);
int state_input_nan_inf = is_nan_or_inf(state.input, l.outputs * l.batch);
printf("\n i - %d is_nan_or_inf(s.delta) = %d \n", i, state_delta_nan_inf);
printf(" i - %d is_nan_or_inf(s.input) = %d \n", i, state_input_nan_inf);
if (state_delta_nan_inf || state_input_nan_inf) { printf(" found "); getchar(); }
}
*/
}
if (net.adversarial && net.attention)
{
int img_size = net.w * net.h * net.c;
float *original_input_cpu = (float *)xcalloc(img_size, sizeof(float));
float *original_delta_cpu = (float *)xcalloc(img_size, sizeof(float));
cuda_pull_array(original_input, original_input_cpu, img_size);
cuda_pull_array(original_delta, original_delta_cpu, img_size);
image attention_img = make_attention_image(img_size, original_delta_cpu, original_input_cpu, net.w, net.h, net.c);
show_image(attention_img, "attention_img");
resize_window_cv("attention_img", 500, 500);
free_image(attention_img);
free(original_input_cpu);
free(original_delta_cpu);
}
if (net.adversarial) {
int x_size = get_network_input_size(net)*net.batch;
printf(" x_size = %d, original_delta = %p, original_input = %p, net.learning_rate = %f \n",
x_size, original_delta, original_input, net.learning_rate);
axpy_ongpu(x_size, net.learning_rate, original_delta, 1, original_input, 1);
constrain_min_max_ongpu(x_size, 0, 1, original_input, 1);
}
if (net.benchmark_layers) {
printf("\n\nSorted by time (backward):\n");
qsort(sorted_avg_time_per_layer, net.n, sizeof(time_benchmark_layers), time_comparator);
for (i = 0; i < net.n; ++i) {
//printf("layer %d - type: %d - avg_time %lf ms \n", avg_time_per_layer[i].layer_id, avg_time_per_layer[i].layer_type, avg_time_per_layer[i].time);
printf("%d - bw-sort-layer %d - type: %d - avg_time %lf ms \n", i, sorted_avg_time_per_layer[i].layer_id, sorted_avg_time_per_layer[i].layer_type, sorted_avg_time_per_layer[i].time);
}
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
const int iteration_num = (*net.seen) / (net.batch * net.subdivisions);
int i;
int update_batch = net.batch*net.subdivisions * get_sequence_value(net);
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if (iteration_num > (net.max_batches * 1 / 2)) l.deform = 0;
if (l.burnin_update && (l.burnin_update*net.burn_in > iteration_num)) continue;
if (l.train_only_bn) continue;
if(l.update_gpu && l.dont_update < iteration_num){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay, net.loss_scale);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
if (net.adversarial) {
state.delta = cuda_make_array(NULL, x_size);
}
state.truth = *net.truth_gpu;
state.train = 1;
#if defined(CUDNN_HALF) && defined(CUDNN)
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
if (net.cudnn_half){
if (l.type == CONVOLUTIONAL && l.weights_gpu && l.weights_gpu16) {
assert((l.nweights) > 0);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16);
}
else if (l.type == CRNN && l.input_layer->weights_gpu && l.input_layer->weights_gpu16) {
assert((l.input_layer->c*l.input_layer->n*l.input_layer->size*l.input_layer->size) > 0);
cuda_convert_f32_to_f16(l.input_layer->weights_gpu, l.input_layer->nweights, l.input_layer->weights_gpu16);
cuda_convert_f32_to_f16(l.self_layer->weights_gpu, l.self_layer->nweights, l.self_layer->weights_gpu16);
cuda_convert_f32_to_f16(l.output_layer->weights_gpu, l.output_layer->nweights, l.output_layer->weights_gpu16);
}
else if (l.type == CONV_LSTM && l.wf->weights_gpu && l.wf->weights_gpu16) {
assert((l.wf->c * l.wf->n * l.wf->size * l.wf->size) > 0);
if (l.peephole) {
cuda_convert_f32_to_f16(l.vf->weights_gpu, l.vf->nweights, l.vf->weights_gpu16);
cuda_convert_f32_to_f16(l.vi->weights_gpu, l.vi->nweights, l.vi->weights_gpu16);
cuda_convert_f32_to_f16(l.vo->weights_gpu, l.vo->nweights, l.vo->weights_gpu16);
}
cuda_convert_f32_to_f16(l.wf->weights_gpu, l.wf->nweights, l.wf->weights_gpu16);
if (!l.bottleneck) {
cuda_convert_f32_to_f16(l.wi->weights_gpu, l.wi->nweights, l.wi->weights_gpu16);
cuda_convert_f32_to_f16(l.wg->weights_gpu, l.wg->nweights, l.wg->weights_gpu16);
cuda_convert_f32_to_f16(l.wo->weights_gpu, l.wo->nweights, l.wo->weights_gpu16);
}
cuda_convert_f32_to_f16(l.uf->weights_gpu, l.uf->nweights, l.uf->weights_gpu16);
cuda_convert_f32_to_f16(l.ui->weights_gpu, l.ui->nweights, l.ui->weights_gpu16);
cuda_convert_f32_to_f16(l.ug->weights_gpu, l.ug->nweights, l.ug->weights_gpu16);
cuda_convert_f32_to_f16(l.uo->weights_gpu, l.uo->nweights, l.uo->weights_gpu16);
}
}
}
#endif
forward_network_gpu(net, state);
//hipStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
if (net.adversarial) {
cuda_free(state.delta);
cuda_pull_array(*net.input_gpu, x, x_size);
}
if(*(state.net.total_bbox) > 0)
fprintf(stderr, " total_bbox = %d, rewritten_bbox = %f %% \n", *(state.net.total_bbox), 100 * (float)*(state.net.rewritten_bbox) / *(state.net.total_bbox));
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
if (net.adversarial_lr && rand_int(0, 1) == 1 && get_current_iteration(net) > net.burn_in) {
net.adversarial = 1;
float lr_old = net.learning_rate;
float scale = (get_current_iteration(net) / ((float)net.max_batches));
//scale = sin(scale * M_PI);
net.learning_rate = net.adversarial_lr * scale;
layer l = net.layers[net.n - 1];
int y_size = get_network_output_size(net)*net.batch;
if (net.layers[net.n - 1].truths) y_size = net.layers[net.n - 1].truths*net.batch;
float *truth_cpu = (float *)xcalloc(y_size, sizeof(float));
const int img_size = net.w*net.h*net.c;
float *old_input = (float *)xcalloc(img_size*net.batch, sizeof(float));
memcpy(old_input, x, img_size*net.batch * sizeof(float));
printf("\n adversarial training, adversarial_lr = %f \n", net.adversarial_lr * scale);
forward_backward_network_gpu(net, x, truth_cpu);
int b;
for (b = 0; b < net.batch; ++b) {
if (b % 2 == 1 && net.contrastive) {
//printf(" b = %d old img, ", b);
memcpy(x + img_size*b, old_input + img_size*b, img_size * sizeof(float));
}
}
image im;
im.w = net.w;
im.h = net.h;
im.c = net.c;
im.data = x;
show_image(im, "adversarial data augmentation");
resize_window_cv("adversarial data augmentation", 500, 500);
wait_key_cv(1);
free(old_input);
free(truth_cpu);
net.learning_rate = lr_old;
net.adversarial = 0;
}
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
//if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
const int sequence = get_sequence_value(net);
//if (((*net.seen) / net.batch) % (net.subdivisions*sequence) == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay, net.loss_scale);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.nweights, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.nweights, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.nweights);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.nweights, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.nweights);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
#ifdef _DEBUG
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
#endif
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
*nets[0].cur_iteration += (n - 1);
*nets[0].seen = nets[0].batch * nets[0].subdivisions * get_current_iteration(nets[0]); // remove this line, when you will save to weights-file both: seen & cur_iteration
if (get_current_iteration(nets[0]) % interval == 0)
{
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
//state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom()
state.input = net.input_state_gpu;
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
cuda_push_array(state.input, net.input_pinned_cpu, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
//cuda_free(state.input); // will be freed in the free_network()
return out;
}
| f4abc6b9f2e8eb975a1d321ae989c77ae0546e48.cu | #include "dark_cuda.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
//#ifdef OPENCV
//#include <opencv2/highgui/highgui_c.h>
//#endif
#include "http_stream.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
typedef struct time_benchmark_layers {
float time;
int layer_id, layer_type;
} time_benchmark_layers;
int time_comparator(const void *pa, const void *pb)
{
time_benchmark_layers a = *(time_benchmark_layers *)pa;
time_benchmark_layers b = *(time_benchmark_layers *)pb;
float diff = a.time - b.time;
if (diff < 0) return 1;
else if (diff > 0) return -1;
return 0;
}
void forward_network_gpu(network net, network_state state)
{
static time_benchmark_layers *avg_time_per_layer = NULL;
static time_benchmark_layers *sorted_avg_time_per_layer = NULL;
double start_time, end_time;
if (net.benchmark_layers) {
if (!avg_time_per_layer) {
avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
sorted_avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
}
cudaDeviceSynchronize();
}
//printf("\n");
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu && state.train){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
if (net.benchmark_layers) {
start_time = get_time_point();
}
l.forward_gpu(l, state);
if (net.benchmark_layers) {
CHECK_CUDA(cudaDeviceSynchronize());
end_time = get_time_point();
const double took_time = (end_time - start_time) / 1000;
const double alpha = 0.9;
if (avg_time_per_layer[i].time == 0) {
avg_time_per_layer[i].layer_id = i;
avg_time_per_layer[i].layer_type = l.type;
avg_time_per_layer[i].time = took_time;
}
else avg_time_per_layer[i].time = avg_time_per_layer[i].time * alpha + took_time * (1 - alpha);
sorted_avg_time_per_layer[i] = avg_time_per_layer[i];
printf("\n fw-layer %d - type: %d - %lf ms - avg_time %lf ms \n", i, l.type, took_time, avg_time_per_layer[i].time);
}
if(net.wait_stream)
cudaStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
//cudaDeviceSynchronize();
/*
cuda_pull_array(l.output_gpu, l.output, l.outputs);
cudaStreamSynchronize(get_cuda_stream());
float avg_val = 0;
int k;
for (k = 0; k < l.outputs; ++k) avg_val += l.output[k];
printf(" i: %d - avg_val = %f \n", i, avg_val / l.outputs);
*/
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
save_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
if (net.benchmark_layers) {
printf("\n\nSorted by time (forward):\n");
qsort(sorted_avg_time_per_layer, net.n, sizeof(time_benchmark_layers), time_comparator);
for (i = 0; i < net.n; ++i) {
//printf("layer %d - type: %d - avg_time %lf ms \n", avg_time_per_layer[i].layer_id, avg_time_per_layer[i].layer_type, avg_time_per_layer[i].time);
printf("%d - fw-sort-layer %d - type: %d - avg_time %lf ms \n", i, sorted_avg_time_per_layer[i].layer_id, sorted_avg_time_per_layer[i].layer_type, sorted_avg_time_per_layer[i].time);
}
}
//cudaStreamSynchronize(get_cuda_stream()); // sync CUDA-functions
//cudaDeviceSynchronize();
}
void backward_network_gpu(network net, network_state state)
{
static time_benchmark_layers *avg_time_per_layer = NULL;
static time_benchmark_layers *sorted_avg_time_per_layer = NULL;
double start_time, end_time;
if (net.benchmark_layers) {
if (!avg_time_per_layer) {
avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
sorted_avg_time_per_layer = (time_benchmark_layers *)calloc(net.n, sizeof(time_benchmark_layers));
}
cudaDeviceSynchronize();
}
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward == 1) break;
if (l.stopbackward > get_current_iteration(net)) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
if (net.optimized_memory && !prev.keep_delta_gpu) {
state.delta = net.state_delta_gpu;
}
}
if (l.onlyforward) continue;
if (net.benchmark_layers) {
start_time = get_time_point();
}
l.backward_gpu(l, state);
if (net.benchmark_layers) {
CHECK_CUDA(cudaDeviceSynchronize());
end_time = get_time_point();
const double took_time = (end_time - start_time) / 1000;
const double alpha = 0.9;
if (avg_time_per_layer[i].time == 0) {
avg_time_per_layer[i].layer_id = i;
avg_time_per_layer[i].layer_type = l.type;
avg_time_per_layer[i].time = took_time;
}
else avg_time_per_layer[i].time = avg_time_per_layer[i].time * alpha + took_time * (1 - alpha);
sorted_avg_time_per_layer[i] = avg_time_per_layer[i];
printf("\n bw-layer %d - type: %d - %lf ms - avg_time %lf ms \n", i, l.type, took_time, avg_time_per_layer[i].time);
}
if (i != 0) {
layer prev = net.layers[i - 1];
if (net.optimized_memory && state.delta && !prev.keep_delta_gpu) {
if (prev.delta_gpu != state.delta) simple_copy_ongpu(prev.outputs*prev.batch, state.delta, prev.delta_gpu);
fill_ongpu(prev.outputs*prev.batch, 0, net.state_delta_gpu, 1);
}
}
/*
if(i != 0)
{
layer l = net.layers[i - 1];
int state_delta_nan_inf = is_nan_or_inf(state.delta, l.outputs * l.batch);
int state_input_nan_inf = is_nan_or_inf(state.input, l.outputs * l.batch);
printf("\n i - %d is_nan_or_inf(s.delta) = %d \n", i, state_delta_nan_inf);
printf(" i - %d is_nan_or_inf(s.input) = %d \n", i, state_input_nan_inf);
if (state_delta_nan_inf || state_input_nan_inf) { printf(" found "); getchar(); }
}
*/
}
if (net.adversarial && net.attention)
{
int img_size = net.w * net.h * net.c;
float *original_input_cpu = (float *)xcalloc(img_size, sizeof(float));
float *original_delta_cpu = (float *)xcalloc(img_size, sizeof(float));
cuda_pull_array(original_input, original_input_cpu, img_size);
cuda_pull_array(original_delta, original_delta_cpu, img_size);
image attention_img = make_attention_image(img_size, original_delta_cpu, original_input_cpu, net.w, net.h, net.c);
show_image(attention_img, "attention_img");
resize_window_cv("attention_img", 500, 500);
free_image(attention_img);
free(original_input_cpu);
free(original_delta_cpu);
}
if (net.adversarial) {
int x_size = get_network_input_size(net)*net.batch;
printf(" x_size = %d, original_delta = %p, original_input = %p, net.learning_rate = %f \n",
x_size, original_delta, original_input, net.learning_rate);
axpy_ongpu(x_size, net.learning_rate, original_delta, 1, original_input, 1);
constrain_min_max_ongpu(x_size, 0, 1, original_input, 1);
}
if (net.benchmark_layers) {
printf("\n\nSorted by time (backward):\n");
qsort(sorted_avg_time_per_layer, net.n, sizeof(time_benchmark_layers), time_comparator);
for (i = 0; i < net.n; ++i) {
//printf("layer %d - type: %d - avg_time %lf ms \n", avg_time_per_layer[i].layer_id, avg_time_per_layer[i].layer_type, avg_time_per_layer[i].time);
printf("%d - bw-sort-layer %d - type: %d - avg_time %lf ms \n", i, sorted_avg_time_per_layer[i].layer_id, sorted_avg_time_per_layer[i].layer_type, sorted_avg_time_per_layer[i].time);
}
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
const int iteration_num = (*net.seen) / (net.batch * net.subdivisions);
int i;
int update_batch = net.batch*net.subdivisions * get_sequence_value(net);
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if (iteration_num > (net.max_batches * 1 / 2)) l.deform = 0;
if (l.burnin_update && (l.burnin_update*net.burn_in > iteration_num)) continue;
if (l.train_only_bn) continue;
if(l.update_gpu && l.dont_update < iteration_num){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay, net.loss_scale);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
if (net.adversarial) {
state.delta = cuda_make_array(NULL, x_size);
}
state.truth = *net.truth_gpu;
state.train = 1;
#if defined(CUDNN_HALF) && defined(CUDNN)
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
if (net.cudnn_half){
if (l.type == CONVOLUTIONAL && l.weights_gpu && l.weights_gpu16) {
assert((l.nweights) > 0);
cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16);
}
else if (l.type == CRNN && l.input_layer->weights_gpu && l.input_layer->weights_gpu16) {
assert((l.input_layer->c*l.input_layer->n*l.input_layer->size*l.input_layer->size) > 0);
cuda_convert_f32_to_f16(l.input_layer->weights_gpu, l.input_layer->nweights, l.input_layer->weights_gpu16);
cuda_convert_f32_to_f16(l.self_layer->weights_gpu, l.self_layer->nweights, l.self_layer->weights_gpu16);
cuda_convert_f32_to_f16(l.output_layer->weights_gpu, l.output_layer->nweights, l.output_layer->weights_gpu16);
}
else if (l.type == CONV_LSTM && l.wf->weights_gpu && l.wf->weights_gpu16) {
assert((l.wf->c * l.wf->n * l.wf->size * l.wf->size) > 0);
if (l.peephole) {
cuda_convert_f32_to_f16(l.vf->weights_gpu, l.vf->nweights, l.vf->weights_gpu16);
cuda_convert_f32_to_f16(l.vi->weights_gpu, l.vi->nweights, l.vi->weights_gpu16);
cuda_convert_f32_to_f16(l.vo->weights_gpu, l.vo->nweights, l.vo->weights_gpu16);
}
cuda_convert_f32_to_f16(l.wf->weights_gpu, l.wf->nweights, l.wf->weights_gpu16);
if (!l.bottleneck) {
cuda_convert_f32_to_f16(l.wi->weights_gpu, l.wi->nweights, l.wi->weights_gpu16);
cuda_convert_f32_to_f16(l.wg->weights_gpu, l.wg->nweights, l.wg->weights_gpu16);
cuda_convert_f32_to_f16(l.wo->weights_gpu, l.wo->nweights, l.wo->weights_gpu16);
}
cuda_convert_f32_to_f16(l.uf->weights_gpu, l.uf->nweights, l.uf->weights_gpu16);
cuda_convert_f32_to_f16(l.ui->weights_gpu, l.ui->nweights, l.ui->weights_gpu16);
cuda_convert_f32_to_f16(l.ug->weights_gpu, l.ug->nweights, l.ug->weights_gpu16);
cuda_convert_f32_to_f16(l.uo->weights_gpu, l.uo->nweights, l.uo->weights_gpu16);
}
}
}
#endif
forward_network_gpu(net, state);
//cudaStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
if (net.adversarial) {
cuda_free(state.delta);
cuda_pull_array(*net.input_gpu, x, x_size);
}
if(*(state.net.total_bbox) > 0)
fprintf(stderr, " total_bbox = %d, rewritten_bbox = %f %% \n", *(state.net.total_bbox), 100 * (float)*(state.net.rewritten_bbox) / *(state.net.total_bbox));
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
if (net.adversarial_lr && rand_int(0, 1) == 1 && get_current_iteration(net) > net.burn_in) {
net.adversarial = 1;
float lr_old = net.learning_rate;
float scale = (get_current_iteration(net) / ((float)net.max_batches));
//scale = sin(scale * M_PI);
net.learning_rate = net.adversarial_lr * scale;
layer l = net.layers[net.n - 1];
int y_size = get_network_output_size(net)*net.batch;
if (net.layers[net.n - 1].truths) y_size = net.layers[net.n - 1].truths*net.batch;
float *truth_cpu = (float *)xcalloc(y_size, sizeof(float));
const int img_size = net.w*net.h*net.c;
float *old_input = (float *)xcalloc(img_size*net.batch, sizeof(float));
memcpy(old_input, x, img_size*net.batch * sizeof(float));
printf("\n adversarial training, adversarial_lr = %f \n", net.adversarial_lr * scale);
forward_backward_network_gpu(net, x, truth_cpu);
int b;
for (b = 0; b < net.batch; ++b) {
if (b % 2 == 1 && net.contrastive) {
//printf(" b = %d old img, ", b);
memcpy(x + img_size*b, old_input + img_size*b, img_size * sizeof(float));
}
}
image im;
im.w = net.w;
im.h = net.h;
im.c = net.c;
im.data = x;
show_image(im, "adversarial data augmentation");
resize_window_cv("adversarial data augmentation", 500, 500);
wait_key_cv(1);
free(old_input);
free(truth_cpu);
net.learning_rate = lr_old;
net.adversarial = 0;
}
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
//if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
const int sequence = get_sequence_value(net);
//if (((*net.seen) / net.batch) % (net.subdivisions*sequence) == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay, net.loss_scale);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.nweights, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.nweights, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.nweights);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.nweights, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.nweights);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
#ifdef _DEBUG
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
#endif
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
*nets[0].cur_iteration += (n - 1);
*nets[0].seen = nets[0].batch * nets[0].subdivisions * get_current_iteration(nets[0]); // remove this line, when you will save to weights-file both: seen & cur_iteration
if (get_current_iteration(nets[0]) % interval == 0)
{
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
//state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom()
state.input = net.input_state_gpu;
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
cuda_push_array(state.input, net.input_pinned_cpu, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
//cuda_free(state.input); // will be freed in the free_network()
return out;
}
|
a4f4426ae3d37e1c4a926eeee36739e1477a8ae5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/bd-cu-kernels.cu
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "chj-cu-kernels-ansi.h"
#include <cmath>
//static is not support
template<typename Real>
__device__
inline Real LogAdd(Real x, Real y) {
Real diff;
if (x < y) {
diff = x - y;
x = y;
} else {
diff = y - x;
}
// diff is negative. x is now the larger one.
if (diff >= -15.9424) {
Real res;
res = x + log1pf(expf(diff));
return res;
} else {
return x; // return the larger one.
}
}
template<typename Real>
__global__
static void _chj_cuda_mat_add_mat(MatrixDim A_dim,Real * A,Real *B,Real *C,Real NOT_USE_VALUE){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * A_dim.stride + j;
if (i < A_dim.rows && j < A_dim.cols) {
if(B[index] == NOT_USE_VALUE || C[index] == NOT_USE_VALUE ){
A[index] = NOT_USE_VALUE;
}else{
A[index] = B[index] + C[index];
}
}
}//_chj_cuda_mat_add_mat
template<typename Real>
__global__
static void _chj_cuda_ctc_loss_fun(MatrixDim A_dim,Real * A,MatrixDim B_dim,Real *B,MatrixDim C_dim,Real *C,Real p,Real NOT_USE_VALUE){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * A_dim.stride + j;
//B_dim not A_dim
if (i < B_dim.rows && j < A_dim.cols) {
Real a=NOT_USE_VALUE;
int n=C[j*C_dim.stride];
for(int ii=1;ii<=n;ii++){
int idb=i*B_dim.stride+ C[ j*C_dim.stride + ii]; //***
if(B[idb]!=NOT_USE_VALUE){
if(a==NOT_USE_VALUE)a=B[idb];
else a=LogAdd(a,B[idb]);
}
}
if(a!=NOT_USE_VALUE){
A[index] -= expf(a-p); //7.29
}
}
}//_chj_cuda_mat_add_mat
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
void chj_cudaF_mat_add_mat(dim3 Gr,dim3 Bl,MatrixDim A_dim,float *A,float *B,float *C,float NOT_USE_VALUE){
hipLaunchKernelGGL(( _chj_cuda_mat_add_mat), dim3(Gr),dim3(Bl), 0, 0, A_dim,A,B,C,NOT_USE_VALUE);
}
void chj_cudaF_ctc_loss_fun(dim3 Gr,dim3 Bl,MatrixDim A_dim,float *A,MatrixDim B_dim,float *B,MatrixDim C_dim,float *C,float p,float NOT_USE_VALUE){
hipLaunchKernelGGL(( _chj_cuda_ctc_loss_fun), dim3(Gr),dim3(Bl), 0, 0, A_dim,A,B_dim,B,C_dim,C,p,NOT_USE_VALUE);
}
| a4f4426ae3d37e1c4a926eeee36739e1477a8ae5.cu | // cudamatrix/bd-cu-kernels.cu
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "chj-cu-kernels-ansi.h"
#include <cmath>
//static is not support
template<typename Real>
__device__
inline Real LogAdd(Real x, Real y) {
Real diff;
if (x < y) {
diff = x - y;
x = y;
} else {
diff = y - x;
}
// diff is negative. x is now the larger one.
if (diff >= -15.9424) {
Real res;
res = x + log1pf(expf(diff));
return res;
} else {
return x; // return the larger one.
}
}
template<typename Real>
__global__
static void _chj_cuda_mat_add_mat(MatrixDim A_dim,Real * A,Real *B,Real *C,Real NOT_USE_VALUE){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * A_dim.stride + j;
if (i < A_dim.rows && j < A_dim.cols) {
if(B[index] == NOT_USE_VALUE || C[index] == NOT_USE_VALUE ){
A[index] = NOT_USE_VALUE;
}else{
A[index] = B[index] + C[index];
}
}
}//_chj_cuda_mat_add_mat
template<typename Real>
__global__
static void _chj_cuda_ctc_loss_fun(MatrixDim A_dim,Real * A,MatrixDim B_dim,Real *B,MatrixDim C_dim,Real *C,Real p,Real NOT_USE_VALUE){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * A_dim.stride + j;
//B_dim not A_dim 这是因为我的程序要求
if (i < B_dim.rows && j < A_dim.cols) {
Real a=NOT_USE_VALUE;
int n=C[j*C_dim.stride];
for(int ii=1;ii<=n;ii++){
int idb=i*B_dim.stride+ C[ j*C_dim.stride + ii]; //***
if(B[idb]!=NOT_USE_VALUE){
if(a==NOT_USE_VALUE)a=B[idb];
else a=LogAdd(a,B[idb]);
}
}
if(a!=NOT_USE_VALUE){
A[index] -= expf(a-p); //7.29
}
}
}//_chj_cuda_mat_add_mat
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
void chj_cudaF_mat_add_mat(dim3 Gr,dim3 Bl,MatrixDim A_dim,float *A,float *B,float *C,float NOT_USE_VALUE){
_chj_cuda_mat_add_mat<<<Gr,Bl>>>(A_dim,A,B,C,NOT_USE_VALUE);
}
void chj_cudaF_ctc_loss_fun(dim3 Gr,dim3 Bl,MatrixDim A_dim,float *A,MatrixDim B_dim,float *B,MatrixDim C_dim,float *C,float p,float NOT_USE_VALUE){
_chj_cuda_ctc_loss_fun<<<Gr,Bl>>>(A_dim,A,B_dim,B,C_dim,C,p,NOT_USE_VALUE);
}
|
2a7556a2f394762a7b848ebbc74d607f0b0786b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TILE_DIM 1024
template<typename T>
__device__ void sum(const T* matrix, T* result,
const int rows, const int cols) {
__shared__ T tile[TILE_DIM];
int index = threadIdx.x;
int length = rows * cols;
int partLength = (length + TILE_DIM - 1) / TILE_DIM;
T sum = 0;
for (int i = 0; i < partLength; i++) {
int valueIndex = i * TILE_DIM + index;
if (valueIndex < length) {
T value = matrix[valueIndex];
sum += value;
}
}
tile[index] = sum;
for (int d = 1; d < TILE_DIM && d < length; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < TILE_DIM) {
T value = tile[valueIndex];
sum += value;
tile[index] = sum;
}
}
}
if (index == 0) {
result[0] = sum;
}
}
| 2a7556a2f394762a7b848ebbc74d607f0b0786b5.cu | #define TILE_DIM 1024
template<typename T>
__device__ void sum(const T* matrix, T* result,
const int rows, const int cols) {
__shared__ T tile[TILE_DIM];
int index = threadIdx.x;
int length = rows * cols;
int partLength = (length + TILE_DIM - 1) / TILE_DIM;
T sum = 0;
for (int i = 0; i < partLength; i++) {
int valueIndex = i * TILE_DIM + index;
if (valueIndex < length) {
T value = matrix[valueIndex];
sum += value;
}
}
tile[index] = sum;
for (int d = 1; d < TILE_DIM && d < length; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < TILE_DIM) {
T value = tile[valueIndex];
sum += value;
tile[index] = sum;
}
}
}
if (index == 0) {
result[0] = sum;
}
}
|
cacbb8daa8c372f413cf0139e4d9fd38c8ff93f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, int conv_dim, bool expand, int fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1)
{
const int smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const int radius = fLen-1;
const int padding = 2*radius;
const int s0 = signal.strides[0];
const int s1 = signal.strides[1];
const int d0 = signal.dims[0];
const int d1 = signal.dims[1];
const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
int gx = ox;
int gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
int endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
int i = glb_x - radius;
int j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
int endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
int i = gx;
int j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
int i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(int f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, int cDim, bool expand, int f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1)
{
CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, int conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
int blk_x = divup(out.dims[0], threads.x);
int blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0,
hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
| cacbb8daa8c372f413cf0139e4d9fd38c8ff93f1.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const int THREADS_X = 16;
static const int THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const int MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, int conv_dim, bool expand, int fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1)
{
const int smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const int radius = fLen-1;
const int padding = 2*radius;
const int s0 = signal.strides[0];
const int s1 = signal.strides[1];
const int d0 = signal.dims[0];
const int d1 = signal.dims[1];
const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
int lx = threadIdx.x;
int ly = threadIdx.y;
int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
int gx = ox;
int gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
int endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
int i = glb_x - radius;
int j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
int endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
int i = gx;
int j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
int i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(int f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, int cDim, bool expand, int f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1)
{
CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, int conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
int blk_x = divup(out.dims[0], threads.x);
int blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0,
cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
|
41156eaee4c98ca4f9c37a493ad22b72733f7ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "Helper.cuh"
#include "Relion.cuh"
namespace gtom
{
//template<uint TpB> __global__ void Project2Dto2DKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, uint dimvolume, tcomplex* d_proj, uint dimproj, uint rmax, uint rmax2);
template<uint ndims, bool decentered, bool squareinterpweights> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft,
tfloat* d_volumeweights,
uint dimvolume,
tcomplex* d_projft,
tfloat* d_projweights,
uint dimproj,
size_t elementsproj,
glm::mat3* d_rotations,
int* d_ivolume,
glm::mat2 magnification,
float ewalddiameterinv,
uint rmax,
int rmax2);
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, tfloat3* h_angles, int* h_ivolume, float4 magnification, float ewaldradius, float supersample, bool outputdecentered, bool squareinterpweights, uint batch)
{
glm::mat3* d_matrices;
int* d_ivolume = NULL;
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i])) * Matrix3Scale(supersample);
d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
if (h_ivolume != NULL)
d_ivolume = (int*)CudaMallocFromHostArray(h_ivolume, sizeof(int) * batch);
}
d_rlnBackproject(d_volumeft, d_volumeweights, dimsvolume, d_projft, d_projweights, dimsproj, rmax, d_matrices, d_ivolume, magnification, ewaldradius * supersample, outputdecentered, squareinterpweights, batch);
{
hipFree(d_matrices);
if (d_ivolume != NULL)
hipFree(d_ivolume);
}
}
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, glm::mat3* d_matrices, int* d_ivolume, float4 magnification, float ewaldradiussuper, bool outputdecentered, bool squareinterpweights, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
rmax = tmin(rmax, dimsproj.x / 2);
glm::mat2 m_magnification;
m_magnification[0][0] = magnification.x;
m_magnification[0][1] = magnification.y;
m_magnification[1][0] = magnification.z;
m_magnification[1][1] = magnification.w;
float ewalddiameterinv = ewaldradiussuper == 0 ? 0 : 1.0f / (2.0f * ewaldradiussuper);
if (ndimsvolume == 3)
{
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
if (squareinterpweights)
{
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
}
else
{
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
}
}
else
{
/*hipMemcpyToSymbol(c_backmatrices, d_matrices, batch * sizeof(glm::mat3), 0, hipMemcpyDeviceToDevice);
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
uint TpB = 1 << tmin(7, tmax(7, (uint)(log(elements / 4.0) / log(2.0))));
if (TpB == 32)
Project2Dto2DKernel<32> << <grid, 32 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 64)
Project2Dto2DKernel<64> << <grid, 64 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 128)
Project2Dto2DKernel<128> << <grid, 128 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 256)
Project2Dto2DKernel<256> << <grid, 256 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else
throw;*/
}
}
template<uint ndims, bool decentered, bool squareinterpweights> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft,
tfloat* d_volumeweights,
uint dimvolume,
tcomplex* d_projft,
tfloat* d_projweights,
uint dimproj,
size_t elementsproj,
glm::mat3* d_rotations,
int* d_ivolume,
glm::mat2 magnification,
float ewalddiameterinv,
uint rmax,
int rmax2)
{
if (d_projft != NULL)
d_projft += elementsproj * blockIdx.y;
if (d_projweights != NULL)
d_projweights += elementsproj * blockIdx.y;
if (d_ivolume != NULL)
{
int ivolume = d_ivolume[blockIdx.y];
if (d_volumeft != NULL)
d_volumeft += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
if (d_volumeweights != NULL)
d_volumeweights += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
}
uint slice = ndims == 3 ? ElementsFFT1(dimproj) * dimproj : 1;
uint dimft = ElementsFFT1(dimproj);
uint dimvolumeft = ElementsFFT1(dimvolume);
glm::mat3 rotation = d_rotations[blockIdx.y];
for (uint id = threadIdx.x; id < elementsproj; id += blockDim.x)
{
uint idx = id % dimft;
uint idy = (ndims == 3 ? id % slice : id) / dimft;
uint idz = ndims == 3 ? id / slice : 0;
int x = idx;
int y = idy <= dimproj / 2 ? idy : (int)idy - (int)dimproj;
int z = ndims == 3 ? (idz <= dimproj / 2 ? idz : (int)idz - (int)dimproj) : 0;
if (ndims == 3)
{
if (x == 0 && y < 0 && z < 0)
continue;
}
else
{
if (x == 0 && y < 0)
continue;
}
glm::vec2 posmag = glm::vec2(x, y);
if (ndims == 2)
posmag = magnification * posmag;
glm::vec3 pos = glm::vec3(posmag.x, posmag.y, z);
if (ndims == 2)
pos.z = ewalddiameterinv * (x * x + y * y);
int r2 = ndims == 3 ? (z * z + y * y + x * x) : (pos.x * pos.x + pos.y * pos.y + pos.z * pos.z);
if (r2 >= rmax2)
continue;
pos = rotation * pos;
// Trilinear interpolation
short x0 = (short)floor(pos.x + 1e-5f);
pos.x -= x0;
short x1 = x0 + 1;
short y0 = (short)floor(pos.y);
pos.y -= y0;
short y1 = y0 + 1;
short z0 = (short)floor(pos.z);
pos.z -= z0;
short z1 = z0 + 1;
short3 positions[8];
positions[0] = make_short3(x0, y0, z0);
positions[1] = make_short3(x1, y0, z0);
positions[2] = make_short3(x0, y1, z0);
positions[3] = make_short3(x1, y1, z0);
positions[4] = make_short3(x0, y0, z1);
positions[5] = make_short3(x1, y0, z1);
positions[6] = make_short3(x0, y1, z1);
positions[7] = make_short3(x1, y1, z1);
float c0 = 1.0f - pos.z;
float c1 = pos.z;
half c00 = __float2half((1.0f - pos.y) * c0);
half c10 = __float2half(pos.y * c0);
half c01 = __float2half((1.0f - pos.y) * c1);
half c11 = __float2half(pos.y * c1);
half interpw[8];
interpw[0] = __float2half((1.0f - pos.x) * __half2float(c00));
interpw[1] = __float2half(pos.x * __half2float(c00));
interpw[2] = __float2half((1.0f - pos.x) * __half2float(c10));
interpw[3] = __float2half(pos.x * __half2float(c10));
interpw[4] = __float2half((1.0f - pos.x) * __half2float(c01));
interpw[5] = __float2half(pos.x * __half2float(c01));
interpw[6] = __float2half((1.0f - pos.x) * __half2float(c11));
interpw[7] = __float2half(pos.x * __half2float(c11));
tcomplex val = make_float2(1, 0);
if (d_projft != NULL)
val = d_projft[id];
tfloat weight = 1;
if (d_projweights != NULL)
weight = d_projweights[id];
for (uint i = 0; i < 8; i++)
{
tcomplex valsym = val;
short3 position = positions[i];
if (positions[i].x < 0)
{
position.x *= -1;
position.y *= -1;
position.z *= -1;
valsym.y *= -1;
}
if (decentered)
{
position.y = position.y < 0 ? position.y + dimvolume : position.y;
position.z = position.z < 0 ? position.z + dimvolume : position.z;
}
else
{
position.y += dimvolume / 2;
position.z += dimvolume / 2;
}
position.x = tmin(dimvolume / 2, position.x);
position.y = tmax(0, tmin(dimvolume - 1, position.y));
position.z = tmax(0, tmin(dimvolume - 1, position.z));
float interpweight = __half2float(interpw[i]);
if (squareinterpweights)
interpweight *= interpweight;
if (d_volumeft != NULL)
{
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * valsym.x);
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x) + 1, interpweight * valsym.y);
}
if (d_volumeweights != NULL)
atomicAdd((tfloat*)(d_volumeweights + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * weight);
if (positions[i].x == 0 && (positions[i].y != 0 || positions[i].z != 0))
{
position = positions[i];
position.x *= -1;
position.y *= -1;
position.z *= -1;
if (decentered)
{
position.y = position.y < 0 ? position.y + dimvolume : position.y;
position.z = position.z < 0 ? position.z + dimvolume : position.z;
}
else
{
position.y += dimvolume / 2;
position.z += dimvolume / 2;
}
position.x = tmin(dimvolume / 2, position.x);
position.y = tmax(0, tmin(dimvolume - 1, position.y));
position.z = tmax(0, tmin(dimvolume - 1, position.z));
if (d_volumeft != NULL)
{
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * valsym.x);
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x) + 1, interpweight * (-valsym.y));
}
if (d_volumeweights != NULL)
atomicAdd((tfloat*)(d_volumeweights + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * weight);
}
}
}
}
}
| 41156eaee4c98ca4f9c37a493ad22b72733f7ffb.cu | #include "Prerequisites.cuh"
#include "Angles.cuh"
#include "Helper.cuh"
#include "Relion.cuh"
namespace gtom
{
//template<uint TpB> __global__ void Project2Dto2DKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, uint dimvolume, tcomplex* d_proj, uint dimproj, uint rmax, uint rmax2);
template<uint ndims, bool decentered, bool squareinterpweights> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft,
tfloat* d_volumeweights,
uint dimvolume,
tcomplex* d_projft,
tfloat* d_projweights,
uint dimproj,
size_t elementsproj,
glm::mat3* d_rotations,
int* d_ivolume,
glm::mat2 magnification,
float ewalddiameterinv,
uint rmax,
int rmax2);
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, tfloat3* h_angles, int* h_ivolume, float4 magnification, float ewaldradius, float supersample, bool outputdecentered, bool squareinterpweights, uint batch)
{
glm::mat3* d_matrices;
int* d_ivolume = NULL;
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i])) * Matrix3Scale(supersample);
d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
if (h_ivolume != NULL)
d_ivolume = (int*)CudaMallocFromHostArray(h_ivolume, sizeof(int) * batch);
}
d_rlnBackproject(d_volumeft, d_volumeweights, dimsvolume, d_projft, d_projweights, dimsproj, rmax, d_matrices, d_ivolume, magnification, ewaldradius * supersample, outputdecentered, squareinterpweights, batch);
{
cudaFree(d_matrices);
if (d_ivolume != NULL)
cudaFree(d_ivolume);
}
}
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, glm::mat3* d_matrices, int* d_ivolume, float4 magnification, float ewaldradiussuper, bool outputdecentered, bool squareinterpweights, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
rmax = tmin(rmax, dimsproj.x / 2);
glm::mat2 m_magnification;
m_magnification[0][0] = magnification.x;
m_magnification[0][1] = magnification.y;
m_magnification[1][0] = magnification.z;
m_magnification[1][1] = magnification.w;
float ewalddiameterinv = ewaldradiussuper == 0 ? 0 : 1.0f / (2.0f * ewaldradiussuper);
if (ndimsvolume == 3)
{
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
if (squareinterpweights)
{
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
}
else
{
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, ewalddiameterinv, rmax, rmax * rmax);
}
}
}
else
{
/*cudaMemcpyToSymbol(c_backmatrices, d_matrices, batch * sizeof(glm::mat3), 0, cudaMemcpyDeviceToDevice);
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
uint TpB = 1 << tmin(7, tmax(7, (uint)(log(elements / 4.0) / log(2.0))));
if (TpB == 32)
Project2Dto2DKernel<32> << <grid, 32 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 64)
Project2Dto2DKernel<64> << <grid, 64 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 128)
Project2Dto2DKernel<128> << <grid, 128 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 256)
Project2Dto2DKernel<256> << <grid, 256 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else
throw;*/
}
}
template<uint ndims, bool decentered, bool squareinterpweights> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft,
tfloat* d_volumeweights,
uint dimvolume,
tcomplex* d_projft,
tfloat* d_projweights,
uint dimproj,
size_t elementsproj,
glm::mat3* d_rotations,
int* d_ivolume,
glm::mat2 magnification,
float ewalddiameterinv,
uint rmax,
int rmax2)
{
if (d_projft != NULL)
d_projft += elementsproj * blockIdx.y;
if (d_projweights != NULL)
d_projweights += elementsproj * blockIdx.y;
if (d_ivolume != NULL)
{
int ivolume = d_ivolume[blockIdx.y];
if (d_volumeft != NULL)
d_volumeft += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
if (d_volumeweights != NULL)
d_volumeweights += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
}
uint slice = ndims == 3 ? ElementsFFT1(dimproj) * dimproj : 1;
uint dimft = ElementsFFT1(dimproj);
uint dimvolumeft = ElementsFFT1(dimvolume);
glm::mat3 rotation = d_rotations[blockIdx.y];
for (uint id = threadIdx.x; id < elementsproj; id += blockDim.x)
{
uint idx = id % dimft;
uint idy = (ndims == 3 ? id % slice : id) / dimft;
uint idz = ndims == 3 ? id / slice : 0;
int x = idx;
int y = idy <= dimproj / 2 ? idy : (int)idy - (int)dimproj;
int z = ndims == 3 ? (idz <= dimproj / 2 ? idz : (int)idz - (int)dimproj) : 0;
if (ndims == 3)
{
if (x == 0 && y < 0 && z < 0)
continue;
}
else
{
if (x == 0 && y < 0)
continue;
}
glm::vec2 posmag = glm::vec2(x, y);
if (ndims == 2)
posmag = magnification * posmag;
glm::vec3 pos = glm::vec3(posmag.x, posmag.y, z);
if (ndims == 2)
pos.z = ewalddiameterinv * (x * x + y * y);
int r2 = ndims == 3 ? (z * z + y * y + x * x) : (pos.x * pos.x + pos.y * pos.y + pos.z * pos.z);
if (r2 >= rmax2)
continue;
pos = rotation * pos;
// Trilinear interpolation
short x0 = (short)floor(pos.x + 1e-5f);
pos.x -= x0;
short x1 = x0 + 1;
short y0 = (short)floor(pos.y);
pos.y -= y0;
short y1 = y0 + 1;
short z0 = (short)floor(pos.z);
pos.z -= z0;
short z1 = z0 + 1;
short3 positions[8];
positions[0] = make_short3(x0, y0, z0);
positions[1] = make_short3(x1, y0, z0);
positions[2] = make_short3(x0, y1, z0);
positions[3] = make_short3(x1, y1, z0);
positions[4] = make_short3(x0, y0, z1);
positions[5] = make_short3(x1, y0, z1);
positions[6] = make_short3(x0, y1, z1);
positions[7] = make_short3(x1, y1, z1);
float c0 = 1.0f - pos.z;
float c1 = pos.z;
half c00 = __float2half((1.0f - pos.y) * c0);
half c10 = __float2half(pos.y * c0);
half c01 = __float2half((1.0f - pos.y) * c1);
half c11 = __float2half(pos.y * c1);
half interpw[8];
interpw[0] = __float2half((1.0f - pos.x) * __half2float(c00));
interpw[1] = __float2half(pos.x * __half2float(c00));
interpw[2] = __float2half((1.0f - pos.x) * __half2float(c10));
interpw[3] = __float2half(pos.x * __half2float(c10));
interpw[4] = __float2half((1.0f - pos.x) * __half2float(c01));
interpw[5] = __float2half(pos.x * __half2float(c01));
interpw[6] = __float2half((1.0f - pos.x) * __half2float(c11));
interpw[7] = __float2half(pos.x * __half2float(c11));
tcomplex val = make_float2(1, 0);
if (d_projft != NULL)
val = d_projft[id];
tfloat weight = 1;
if (d_projweights != NULL)
weight = d_projweights[id];
for (uint i = 0; i < 8; i++)
{
tcomplex valsym = val;
short3 position = positions[i];
if (positions[i].x < 0)
{
position.x *= -1;
position.y *= -1;
position.z *= -1;
valsym.y *= -1;
}
if (decentered)
{
position.y = position.y < 0 ? position.y + dimvolume : position.y;
position.z = position.z < 0 ? position.z + dimvolume : position.z;
}
else
{
position.y += dimvolume / 2;
position.z += dimvolume / 2;
}
position.x = tmin(dimvolume / 2, position.x);
position.y = tmax(0, tmin(dimvolume - 1, position.y));
position.z = tmax(0, tmin(dimvolume - 1, position.z));
float interpweight = __half2float(interpw[i]);
if (squareinterpweights)
interpweight *= interpweight;
if (d_volumeft != NULL)
{
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * valsym.x);
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x) + 1, interpweight * valsym.y);
}
if (d_volumeweights != NULL)
atomicAdd((tfloat*)(d_volumeweights + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * weight);
if (positions[i].x == 0 && (positions[i].y != 0 || positions[i].z != 0))
{
position = positions[i];
position.x *= -1;
position.y *= -1;
position.z *= -1;
if (decentered)
{
position.y = position.y < 0 ? position.y + dimvolume : position.y;
position.z = position.z < 0 ? position.z + dimvolume : position.z;
}
else
{
position.y += dimvolume / 2;
position.z += dimvolume / 2;
}
position.x = tmin(dimvolume / 2, position.x);
position.y = tmax(0, tmin(dimvolume - 1, position.y));
position.z = tmax(0, tmin(dimvolume - 1, position.z));
if (d_volumeft != NULL)
{
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * valsym.x);
atomicAdd((tfloat*)(d_volumeft + (position.z * dimvolume + position.y) * dimvolumeft + position.x) + 1, interpweight * (-valsym.y));
}
if (d_volumeweights != NULL)
atomicAdd((tfloat*)(d_volumeweights + (position.z * dimvolume + position.y) * dimvolumeft + position.x), interpweight * weight);
}
}
}
}
}
|
0eff6baa4b651715ac75412543f4042106b609f2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* bicg.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../common/polybenchUtilFuncts.h"
//Error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 4096
#define NY 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r)
{
int i, j;
for (i = 0; i < NX; i++)
{
r[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*j) / NX;
}
}
for (i = 0; i < NY; i++)
{
p[i] = i * M_PI;
}
}
void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu)
{
int i,fail;
fail = 0;
// Compare s with s_cuda
for (i=0; i<NX; i++)
{
if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i<NY; i++)
{
if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
//Distributed (split) from initial loop and permuted into reverse order to allow parallelism...
__global__ void bicg_kernel1(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
s[j] = 0.0f;
int i;
for(i = 0; i < NX; i++)
{
s[j] += A[i * NY + j] * r[i];
}
}
}
//Distributed (split) from initial loop to allow parallelism
__global__ void bicg_kernel2(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
q[i] = 0.0f;
int j;
for(j=0; j < NY; j++)
{
q[i] += A[i * NY + j] * p[j];
}
}
}
void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q)
{
int i,j;
for (i = 0; i < NY; i++)
{
s[i] = 0.0;
}
for (i = 0; i < NX; i++)
{
q[i] = 0.0;
for (j = 0; j < NY; j++)
{
s[j] = s[j] + r[i] * A[i*NY + j];
q[i] = q[i] + A[i*NY + j] * p[j];
}
}
}
void bicgCuda(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q,
DATA_TYPE* s_outputFromGpu, DATA_TYPE* q_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *q_gpu;
DATA_TYPE *p_gpu;
DATA_TYPE *r_gpu;
DATA_TYPE *s_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&r_gpu, sizeof(DATA_TYPE) * NX);
hipMalloc((void **)&s_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&p_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&q_gpu, sizeof(DATA_TYPE) * NX);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
hipMemcpy(r_gpu, r, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
hipMemcpy(s_gpu, s, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(p_gpu, p, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(q_gpu, q, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
t_start = rtclock();
hipLaunchKernelGGL(( bicg_kernel1), dim3(grid1), dim3(block) , 0, 0, A_gpu, r_gpu, s_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( bicg_kernel2), dim3(grid2), dim3(block) , 0, 0, A_gpu, p_gpu, q_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(s_outputFromGpu, s_gpu, sizeof(DATA_TYPE) * NY, hipMemcpyDeviceToHost);
hipMemcpy(q_outputFromGpu, q_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(r_gpu);
hipFree(s_gpu);
hipFree(p_gpu);
hipFree(q_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* r;
DATA_TYPE* s;
DATA_TYPE* p;
DATA_TYPE* q;
DATA_TYPE* s_outputFromGpu;
DATA_TYPE* q_outputFromGpu;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q_outputFromGpu = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
init_array(A, p, r);
GPU_argv_init();
bicgCuda(A, r, s, p, q, s_outputFromGpu, q_outputFromGpu);
t_start = rtclock();
bicg_cpu(A, r, s, p, q);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(s, s_outputFromGpu, q, q_outputFromGpu);
free(A);
free(r);
free(s);
free(p);
free(q);
free(s_outputFromGpu);
free(q_outputFromGpu);
return 0;
}
| 0eff6baa4b651715ac75412543f4042106b609f2.cu | /**
* bicg.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#include "../common/polybenchUtilFuncts.h"
//Error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 4096
#define NY 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r)
{
int i, j;
for (i = 0; i < NX; i++)
{
r[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*j) / NX;
}
}
for (i = 0; i < NY; i++)
{
p[i] = i * M_PI;
}
}
void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu)
{
int i,fail;
fail = 0;
// Compare s with s_cuda
for (i=0; i<NX; i++)
{
if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i<NY; i++)
{
if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
//Distributed (split) from initial loop and permuted into reverse order to allow parallelism...
__global__ void bicg_kernel1(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
s[j] = 0.0f;
int i;
for(i = 0; i < NX; i++)
{
s[j] += A[i * NY + j] * r[i];
}
}
}
//Distributed (split) from initial loop to allow parallelism
__global__ void bicg_kernel2(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
q[i] = 0.0f;
int j;
for(j=0; j < NY; j++)
{
q[i] += A[i * NY + j] * p[j];
}
}
}
void bicg_cpu(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q)
{
int i,j;
for (i = 0; i < NY; i++)
{
s[i] = 0.0;
}
for (i = 0; i < NX; i++)
{
q[i] = 0.0;
for (j = 0; j < NY; j++)
{
s[j] = s[j] + r[i] * A[i*NY + j];
q[i] = q[i] + A[i*NY + j] * p[j];
}
}
}
void bicgCuda(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q,
DATA_TYPE* s_outputFromGpu, DATA_TYPE* q_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *q_gpu;
DATA_TYPE *p_gpu;
DATA_TYPE *r_gpu;
DATA_TYPE *s_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&r_gpu, sizeof(DATA_TYPE) * NX);
cudaMalloc((void **)&s_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&p_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&q_gpu, sizeof(DATA_TYPE) * NX);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(r_gpu, r, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
cudaMemcpy(s_gpu, s, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(p_gpu, p, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(q_gpu, q, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
t_start = rtclock();
bicg_kernel1<<< grid1, block >>>(A_gpu, r_gpu, s_gpu);
cudaThreadSynchronize();
bicg_kernel2<<< grid2, block >>>(A_gpu, p_gpu, q_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(s_outputFromGpu, s_gpu, sizeof(DATA_TYPE) * NY, cudaMemcpyDeviceToHost);
cudaMemcpy(q_outputFromGpu, q_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(r_gpu);
cudaFree(s_gpu);
cudaFree(p_gpu);
cudaFree(q_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* r;
DATA_TYPE* s;
DATA_TYPE* p;
DATA_TYPE* q;
DATA_TYPE* s_outputFromGpu;
DATA_TYPE* q_outputFromGpu;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
r = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
p = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
s_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
q_outputFromGpu = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
init_array(A, p, r);
GPU_argv_init();
bicgCuda(A, r, s, p, q, s_outputFromGpu, q_outputFromGpu);
t_start = rtclock();
bicg_cpu(A, r, s, p, q);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(s, s_outputFromGpu, q, q_outputFromGpu);
free(A);
free(r);
free(s);
free(p);
free(q);
free(s_outputFromGpu);
free(q_outputFromGpu);
return 0;
}
|
45357e7eaf34098388d40cf642f431279d8241ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_a;
int xdim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_a;
int ydim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_a;
int xdim1_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_a;
int ydim1_update_halo_kernel5_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_a*(y)+xdim0_update_halo_kernel5_plus_4_a*ydim0_update_halo_kernel5_plus_4_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_a*(y)+xdim1_update_halo_kernel5_plus_4_a*ydim1_update_halo_kernel5_plus_4_a*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,4,0)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,4,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_a + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_a + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,83)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(83,"update_halo_kernel5_plus_4_a");
OPS_kernels[83].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h || ydim0 != ydim0_update_halo_kernel5_plus_4_a_h || xdim1 != xdim1_update_halo_kernel5_plus_4_a_h || ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_a_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_a_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_a_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[83].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[83].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[83].mpi_time += t2-t1;
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 83;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 83;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(83,"update_halo_kernel5_plus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
| 45357e7eaf34098388d40cf642f431279d8241ad.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_a;
int xdim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_a;
int ydim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_a;
int xdim1_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_a;
int ydim1_update_halo_kernel5_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_a*(y)+xdim0_update_halo_kernel5_plus_4_a*ydim0_update_halo_kernel5_plus_4_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_a*(y)+xdim1_update_halo_kernel5_plus_4_a*ydim1_update_halo_kernel5_plus_4_a*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,4,0)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,4,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_a + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_a + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,83)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(83,"update_halo_kernel5_plus_4_a");
OPS_kernels[83].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h || ydim0 != ydim0_update_halo_kernel5_plus_4_a_h || xdim1 != xdim1_update_halo_kernel5_plus_4_a_h || ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_a_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_a_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_a_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[83].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_4_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[83].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[83].mpi_time += t2-t1;
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 83;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 83;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(83,"update_halo_kernel5_plus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
eb9d4addf3c34620c10353713fe1fb8ba78bb18c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define n 1024*1024
__global__ void kernel(int a,int *x, int *y)
{
int i=threadIdx.x+blockIdx.x*blockDim.x;
y[i]=a*x[i]+y[i];
}
int main(void)
{
float elapsedTime = 0.0f;
hipEvent_t start, stop;
hipError_t err=hipSuccess;
int *host_a,*host_b;
host_b = (int *) malloc(n);
host_a = (int *) malloc(n);
int *dev_array_a,*dev_array_b;
hipMalloc((void **)&dev_array_a, n);
hipMalloc((void **)&dev_array_b, n);
for (int i = 0; i < n/sizeof(int); i++) {
host_a[i] = i;
}
hipMemcpy(dev_array_a, host_a, n, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( kernel), dim3(256),dim3(1024), 0, 0, 2,dev_array_a,dev_array_b);
hipEventRecord(stop,0);
hipEventSynchronize(stop); //Wait till the event is executed.
hipMemcpy(host_b, dev_array_b, n, hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Time for kernel to exexute:%fms\n",elapsedTime);
printf("Arithmetic Performance = %5f Gflops/s\n\n", n * 1e-6/elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
err=hipGetLastError();
if(err!=hipSuccess) {
fprintf(stderr,"Error executing the kernel - %s\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
| eb9d4addf3c34620c10353713fe1fb8ba78bb18c.cu | #include <stdio.h>
#define n 1024*1024
__global__ void kernel(int a,int *x, int *y)
{
int i=threadIdx.x+blockIdx.x*blockDim.x;
y[i]=a*x[i]+y[i];
}
int main(void)
{
float elapsedTime = 0.0f;
cudaEvent_t start, stop;
cudaError_t err=cudaSuccess;
int *host_a,*host_b;
host_b = (int *) malloc(n);
host_a = (int *) malloc(n);
int *dev_array_a,*dev_array_b;
cudaMalloc((void **)&dev_array_a, n);
cudaMalloc((void **)&dev_array_b, n);
for (int i = 0; i < n/sizeof(int); i++) {
host_a[i] = i;
}
cudaMemcpy(dev_array_a, host_a, n, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
kernel<<<256,1024>>>(2,dev_array_a,dev_array_b);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop); //Wait till the event is executed.
cudaMemcpy(host_b, dev_array_b, n, cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Time for kernel to exexute:%fms\n",elapsedTime);
printf("Arithmetic Performance = %5f Gflops/s\n\n", n * 1e-6/elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
err=cudaGetLastError();
if(err!=cudaSuccess) {
fprintf(stderr,"Error executing the kernel - %s\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
d763b5d72236a32dcb003149adc02c9a8b3340af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight)
{
THCTensor_(resizeAs)(state, output, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply2<real, real>(state, output, input, PReLUUpdateOutput<real>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (input->size(ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size(ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluForward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (input->size(ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size(ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>());
// introduces a sync point
real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput));
real w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale));
}
else
{
THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale));
THCTensor *gradWeightBuf = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *sumbuf = THCTensor_(new)(state);
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size(d);
}
THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 1);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sumbuf);
}
THCTensor_(free)(state, gradWeightBuf);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
}
}
#endif
| d763b5d72236a32dcb003149adc02c9a8b3340af.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight)
{
THCTensor_(resizeAs)(state, output, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply2<real, real>(state, output, input, PReLUUpdateOutput<real>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (input->size(ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size(ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
preluForward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w));
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (input->size(ndim > 1) != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size(ndim > 1));
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size(d);
}
int nElemsPerSample = nOutputPlane * mapSize;
preluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
int64_t nOutputPlane = THCTensor_(numel)(state, weight);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>());
// introduces a sync point
real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput));
real w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
else
{
int ndim = THCTensor_(nDimensionLegacyAll)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3<real, real, real>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale));
}
else
{
THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale));
THCTensor *gradWeightBuf = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *sumbuf = THCTensor_(new)(state);
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size(d);
}
THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 1);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sumbuf);
}
THCTensor_(free)(state, gradWeightBuf);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight);
}
}
}
#endif
|
89a0c98bec8b0313cc63636925d87cb67f0d2406.hip | // !!! This is a file automatically generated by hipify!!!
/*
* reference: CUDA Samples deviceQuery.cpp
* Compiling: $ nvcc gpu_info_check.cu
*
*/
// CUDA-C includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "./utils/book.h"
int main( void ) {
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) ); // get devices number
int driverVersion = 0, runtimeVersion = 0;
for (int i=0; i< count; i++) {
hipSetDevice(i); // set the device to query
hipDeviceProp_t deviceProp;
HANDLE_ERROR( hipGetDeviceProperties( &deviceProp, i ) );
printf( " --- General Information for device %d ---\n", i );
printf("Device %d: \"%s\"\n", i, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
char msg[256];
sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf("%s", msg);
printf( "Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor );
printf( "Clock rate: %d\n", deviceProp.clockRate );
printf( "Device copy overlap: " );
if (deviceProp.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (deviceProp.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", deviceProp.totalGlobalMem );
printf( "Total constant Mem: %ld\n", deviceProp.totalConstMem );
printf( "Max mem pitch: %ld\n", deviceProp.memPitch );
printf( "32-bits register per block", deviceProp.regsPerBlock);
printf( "Texture Alignment: %ld\n", deviceProp.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Shared mem per mp: %ld\n", deviceProp.sharedMemPerBlock );
printf( "Registers per mp: %d\n", deviceProp.regsPerBlock );
printf( "Threads in warp: %d\n", deviceProp.warpSize );
printf( "Max threads per block: %d\n",
deviceProp.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2] );
printf( "Memory Bus Width in bits: %d\n",deviceProp.memoryBusWidth);
printf( "\n" );
}
}
| 89a0c98bec8b0313cc63636925d87cb67f0d2406.cu | /*
* reference: CUDA Samples deviceQuery.cpp
* Compiling: $ nvcc gpu_info_check.cu
*
*/
// CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
#include "./utils/book.h"
int main( void ) {
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) ); // get devices number
int driverVersion = 0, runtimeVersion = 0;
for (int i=0; i< count; i++) {
cudaSetDevice(i); // set the device to query
cudaDeviceProp deviceProp;
HANDLE_ERROR( cudaGetDeviceProperties( &deviceProp, i ) );
printf( " --- General Information for device %d ---\n", i );
printf("Device %d: \"%s\"\n", i, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
char msg[256];
sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf("%s", msg);
printf( "Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor );
printf( "Clock rate: %d\n", deviceProp.clockRate );
printf( "Device copy overlap: " );
if (deviceProp.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (deviceProp.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", deviceProp.totalGlobalMem );
printf( "Total constant Mem: %ld\n", deviceProp.totalConstMem );
printf( "Max mem pitch: %ld\n", deviceProp.memPitch );
printf( "32-bits register per block", deviceProp.regsPerBlock);
printf( "Texture Alignment: %ld\n", deviceProp.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Shared mem per mp: %ld\n", deviceProp.sharedMemPerBlock );
printf( "Registers per mp: %d\n", deviceProp.regsPerBlock );
printf( "Threads in warp: %d\n", deviceProp.warpSize );
printf( "Max threads per block: %d\n",
deviceProp.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2] );
printf( "Memory Bus Width in bits: %d\n",deviceProp.memoryBusWidth);
printf( "\n" );
}
}
|
0db30b2c55cf3c3eb4b7009454810755bc7fe6b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <stdio.h>
#include "./utils.h"
#define SHARED
#define FILTERWIDTH 9
#define NTHREADS 32
#ifdef SHARED
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth) {
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
// --- copy filter to shared memory --- //
__shared__ float shared_filter[FILTERWIDTH * FILTERWIDTH];
if (threadIdx.x < FILTERWIDTH && threadIdx.y < FILTERWIDTH) {
int idx = threadIdx.x * FILTERWIDTH + threadIdx.y;
shared_filter[idx] = filter[idx];
}
__syncthreads();
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float new_pixel_value = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(thread_2D_pos.y + filter_r, 0), numRows - 1);
int image_c = min(max(thread_2D_pos.x + filter_c, 0), numCols - 1);
float pixel_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = shared_filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
new_pixel_value += pixel_value * filter_value;
}
}
outputChannel[thread_1D_pos] = static_cast<unsigned char>(new_pixel_value);
}
#else
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth) {
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float new_pixel_value = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(thread_2D_pos.y + filter_r, 0), numRows - 1);
int image_c = min(max(thread_2D_pos.x + filter_c, 0), numCols - 1);
float pixel_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
new_pixel_value += pixel_value * filter_value;
}
}
outputChannel[thread_1D_pos] = static_cast<unsigned char>(new_pixel_value);
}
#endif // SHARED
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
uchar4 pixel = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = pixel.x;
greenChannel[thread_1D_pos] = pixel.y;
blueChannel[thread_1D_pos] = pixel.z;
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float* const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// filter
size_t sizeInBytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(hipMalloc(&d_filter, sizeInBytes));
// Copy the filter on the host (h_filter) to the
// memory we just allocated on the GPU.
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeInBytes, hipMemcpyHostToDevice));
}
void gaussian_blur(const uchar4 * const h_inputImageRGBA,
uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA,
const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth) {
// Set block size
const dim3 blockSize(NTHREADS, NTHREADS, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
const dim3 gridSize(ceil(static_cast<float>(numCols)/NTHREADS),
ceil(static_cast<float>(numRows)/NTHREADS), 1);
// Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Call the convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Recombine your results
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
// Free all the memory that we allocated
void cleanupCu() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 0db30b2c55cf3c3eb4b7009454810755bc7fe6b3.cu | //****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <stdio.h>
#include "./utils.h"
#define SHARED
#define FILTERWIDTH 9
#define NTHREADS 32
#ifdef SHARED
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth) {
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
// --- copy filter to shared memory --- //
__shared__ float shared_filter[FILTERWIDTH * FILTERWIDTH];
if (threadIdx.x < FILTERWIDTH && threadIdx.y < FILTERWIDTH) {
int idx = threadIdx.x * FILTERWIDTH + threadIdx.y;
shared_filter[idx] = filter[idx];
}
__syncthreads();
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float new_pixel_value = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(thread_2D_pos.y + filter_r, 0), numRows - 1);
int image_c = min(max(thread_2D_pos.x + filter_c, 0), numCols - 1);
float pixel_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = shared_filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
new_pixel_value += pixel_value * filter_value;
}
}
outputChannel[thread_1D_pos] = static_cast<unsigned char>(new_pixel_value);
}
#else
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth) {
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float new_pixel_value = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(thread_2D_pos.y + filter_r, 0), numRows - 1);
int image_c = min(max(thread_2D_pos.x + filter_c, 0), numCols - 1);
float pixel_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
new_pixel_value += pixel_value * filter_value;
}
}
outputChannel[thread_1D_pos] = static_cast<unsigned char>(new_pixel_value);
}
#endif // SHARED
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
uchar4 pixel = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = pixel.x;
greenChannel[thread_1D_pos] = pixel.y;
blueChannel[thread_1D_pos] = pixel.z;
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float* const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// filter
size_t sizeInBytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(cudaMalloc(&d_filter, sizeInBytes));
// Copy the filter on the host (h_filter) to the
// memory we just allocated on the GPU.
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeInBytes, cudaMemcpyHostToDevice));
}
void gaussian_blur(const uchar4 * const h_inputImageRGBA,
uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA,
const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth) {
// Set block size
const dim3 blockSize(NTHREADS, NTHREADS, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
const dim3 gridSize(ceil(static_cast<float>(numCols)/NTHREADS),
ceil(static_cast<float>(numRows)/NTHREADS), 1);
// Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Call the convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Recombine your results
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
// Free all the memory that we allocated
void cleanupCu() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
f8d9651e7c828eecfd23a13f9473e3dfd59ad081.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include "../distance.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int32_t *d_mss, *d_mss_offsets, *d_ts, *d_ss, *d_tlen, *d_toffsets, *d_slen, *d_soffsets, *d_params, *d_3d_cost_matrix;
int num_templates, num_streams, num_params_sets, h_ts_length, h_ss_length, h_mss_length;
__global__ void wlcss_cuda_kernel(int32_t *d_mss, int32_t *d_mss_offsets, int32_t *d_ts, int32_t *d_ss, int32_t *d_tlen, int32_t *d_toffsets, int32_t *d_slen, int32_t *d_soffsets, int32_t *d_params, int32_t *d_3d_cost_matrix){
int32_t params_idx = threadIdx.x;
int32_t template_idx = blockIdx.x;
int32_t stream_idx = blockIdx.y;
int32_t t_len = d_tlen[template_idx];
int32_t s_len = d_slen[stream_idx];
int32_t t_offset = d_toffsets[template_idx];
int32_t s_offset = d_soffsets[stream_idx];
int32_t d_mss_offset = d_mss_offsets[params_idx*gridDim.x*gridDim.y+template_idx*gridDim.y+stream_idx];
int32_t *mss = &d_mss[d_mss_offset];
int32_t *tmp_window = new int32_t[(t_len + 2)]();
int32_t *t = &d_ts[t_offset];
int32_t *s = &d_ss[s_offset];
int32_t reward = d_params[params_idx*3];
int32_t penalty = d_params[params_idx*3+1];
int32_t accepteddist = d_params[params_idx*3+2];
int32_t tmp = 0;
for(int32_t j=0;j<s_len;j++){
for(int32_t i=0;i<t_len;i++){
int32_t distance = d_3d_cost_matrix[s[j]*26 + t[i]];;
if (distance <= accepteddist){
tmp = tmp_window[i]+reward;
} else{
tmp = max(tmp_window[i]-penalty*distance,
max(tmp_window[i+1]-penalty*distance,
tmp_window[t_len+1]-penalty*distance));
}
tmp_window[i] = tmp_window[t_len+1];
tmp_window[t_len+1] = tmp;
}
tmp_window[t_len] = tmp_window[t_len+1];
mss[j] = tmp_window[t_len+1];
tmp_window[t_len+1] = 0;
}
delete [] tmp_window;
}
extern "C"{
void wlcss_cuda_init(int32_t *h_mss, int32_t *h_mss_offsets,
int32_t *h_ss, int32_t *h_slen, int32_t *h_soffsets,
int32_t *h_tlen, int32_t *h_toffsets,
int32_t *h_params, int num_ts, int num_ss, int num_ps, int h_ts_len, int h_ss_len, int h_mss_len){
num_templates = num_ts;
num_streams = num_ss;
num_params_sets = num_ps;
h_ts_length = h_ts_len;
h_ss_length = h_ss_len;
h_mss_length = h_mss_len;
//Allocate memory for cost matrix
gpuErrchk( hipMalloc((void **) &d_3d_cost_matrix, 676 * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_3d_cost_matrix, h_3d_cost_matrix, 676 * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams array
gpuErrchk( hipMalloc((void **) &d_ss, h_ss_length * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_ss, h_ss, h_ss_length * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams lengths
gpuErrchk( hipMalloc((void **) &d_slen, num_streams * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_slen, h_slen, num_streams * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for streams offsets
gpuErrchk( hipMalloc((void **) &d_soffsets, num_streams * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_soffsets, h_soffsets, num_streams * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for templates array
gpuErrchk( hipMalloc((void **) &d_ts, h_ts_length * sizeof(int32_t)) );
// Allocate memory for templates lengths
gpuErrchk( hipMalloc((void **) &d_tlen, num_templates * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_tlen, h_tlen, num_templates * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for templates offsets
gpuErrchk( hipMalloc((void **) &d_toffsets, num_templates * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_toffsets, h_toffsets, num_templates * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for params array
gpuErrchk( hipMalloc((void **) &d_params, num_params_sets * 3 * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_params, h_params, num_params_sets * 3 * sizeof(int32_t), hipMemcpyHostToDevice) );
// Allocate memory for matching scores
gpuErrchk( hipMalloc((void **) &d_mss, h_mss_length * sizeof(int32_t)) );
// Allocate memory for matching scores offsets
gpuErrchk( hipMalloc((void **) &d_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t)) );
gpuErrchk( hipMemcpy(d_mss_offsets, h_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t), hipMemcpyHostToDevice) );
}
void wlcss_cuda(int32_t *h_ts, int32_t *h_mss){
gpuErrchk( hipMemcpy(d_ts, h_ts, h_ts_length * sizeof(int32_t), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_mss, h_mss, h_mss_length * sizeof(int32_t), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( wlcss_cuda_kernel), dim3(dim3(num_templates, num_streams)), dim3(num_params_sets), 0, 0, d_mss, d_mss_offsets, d_ts, d_ss, d_tlen, d_toffsets, d_slen, d_soffsets, d_params, d_3d_cost_matrix);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy(h_mss, d_mss, h_mss_length * sizeof(int32_t), hipMemcpyDeviceToHost) );
}
void wlcss_freemem(){
hipFree(d_ts);
hipFree(d_tlen);
hipFree(d_toffsets);
hipFree(d_ss);
hipFree(d_slen);
hipFree(d_soffsets);
hipFree(d_mss);
hipFree(d_mss_offsets);
hipFree(d_params);
hipFree(d_3d_cost_matrix);
}
}
| f8d9651e7c828eecfd23a13f9473e3dfd59ad081.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include "../distance.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int32_t *d_mss, *d_mss_offsets, *d_ts, *d_ss, *d_tlen, *d_toffsets, *d_slen, *d_soffsets, *d_params, *d_3d_cost_matrix;
int num_templates, num_streams, num_params_sets, h_ts_length, h_ss_length, h_mss_length;
__global__ void wlcss_cuda_kernel(int32_t *d_mss, int32_t *d_mss_offsets, int32_t *d_ts, int32_t *d_ss, int32_t *d_tlen, int32_t *d_toffsets, int32_t *d_slen, int32_t *d_soffsets, int32_t *d_params, int32_t *d_3d_cost_matrix){
int32_t params_idx = threadIdx.x;
int32_t template_idx = blockIdx.x;
int32_t stream_idx = blockIdx.y;
int32_t t_len = d_tlen[template_idx];
int32_t s_len = d_slen[stream_idx];
int32_t t_offset = d_toffsets[template_idx];
int32_t s_offset = d_soffsets[stream_idx];
int32_t d_mss_offset = d_mss_offsets[params_idx*gridDim.x*gridDim.y+template_idx*gridDim.y+stream_idx];
int32_t *mss = &d_mss[d_mss_offset];
int32_t *tmp_window = new int32_t[(t_len + 2)]();
int32_t *t = &d_ts[t_offset];
int32_t *s = &d_ss[s_offset];
int32_t reward = d_params[params_idx*3];
int32_t penalty = d_params[params_idx*3+1];
int32_t accepteddist = d_params[params_idx*3+2];
int32_t tmp = 0;
for(int32_t j=0;j<s_len;j++){
for(int32_t i=0;i<t_len;i++){
int32_t distance = d_3d_cost_matrix[s[j]*26 + t[i]];;
if (distance <= accepteddist){
tmp = tmp_window[i]+reward;
} else{
tmp = max(tmp_window[i]-penalty*distance,
max(tmp_window[i+1]-penalty*distance,
tmp_window[t_len+1]-penalty*distance));
}
tmp_window[i] = tmp_window[t_len+1];
tmp_window[t_len+1] = tmp;
}
tmp_window[t_len] = tmp_window[t_len+1];
mss[j] = tmp_window[t_len+1];
tmp_window[t_len+1] = 0;
}
delete [] tmp_window;
}
extern "C"{
void wlcss_cuda_init(int32_t *h_mss, int32_t *h_mss_offsets,
int32_t *h_ss, int32_t *h_slen, int32_t *h_soffsets,
int32_t *h_tlen, int32_t *h_toffsets,
int32_t *h_params, int num_ts, int num_ss, int num_ps, int h_ts_len, int h_ss_len, int h_mss_len){
num_templates = num_ts;
num_streams = num_ss;
num_params_sets = num_ps;
h_ts_length = h_ts_len;
h_ss_length = h_ss_len;
h_mss_length = h_mss_len;
//Allocate memory for cost matrix
gpuErrchk( cudaMalloc((void **) &d_3d_cost_matrix, 676 * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_3d_cost_matrix, h_3d_cost_matrix, 676 * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams array
gpuErrchk( cudaMalloc((void **) &d_ss, h_ss_length * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_ss, h_ss, h_ss_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams lengths
gpuErrchk( cudaMalloc((void **) &d_slen, num_streams * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_slen, h_slen, num_streams * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for streams offsets
gpuErrchk( cudaMalloc((void **) &d_soffsets, num_streams * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_soffsets, h_soffsets, num_streams * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for templates array
gpuErrchk( cudaMalloc((void **) &d_ts, h_ts_length * sizeof(int32_t)) );
// Allocate memory for templates lengths
gpuErrchk( cudaMalloc((void **) &d_tlen, num_templates * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_tlen, h_tlen, num_templates * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for templates offsets
gpuErrchk( cudaMalloc((void **) &d_toffsets, num_templates * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_toffsets, h_toffsets, num_templates * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for params array
gpuErrchk( cudaMalloc((void **) &d_params, num_params_sets * 3 * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_params, h_params, num_params_sets * 3 * sizeof(int32_t), cudaMemcpyHostToDevice) );
// Allocate memory for matching scores
gpuErrchk( cudaMalloc((void **) &d_mss, h_mss_length * sizeof(int32_t)) );
// Allocate memory for matching scores offsets
gpuErrchk( cudaMalloc((void **) &d_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(d_mss_offsets, h_mss_offsets, num_streams*num_templates*num_params_sets * sizeof(int32_t), cudaMemcpyHostToDevice) );
}
void wlcss_cuda(int32_t *h_ts, int32_t *h_mss){
gpuErrchk( cudaMemcpy(d_ts, h_ts, h_ts_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_mss, h_mss, h_mss_length * sizeof(int32_t), cudaMemcpyHostToDevice) );
wlcss_cuda_kernel<<<dim3(num_templates, num_streams), num_params_sets>>>(d_mss, d_mss_offsets, d_ts, d_ss, d_tlen, d_toffsets, d_slen, d_soffsets, d_params, d_3d_cost_matrix);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy(h_mss, d_mss, h_mss_length * sizeof(int32_t), cudaMemcpyDeviceToHost) );
}
void wlcss_freemem(){
cudaFree(d_ts);
cudaFree(d_tlen);
cudaFree(d_toffsets);
cudaFree(d_ss);
cudaFree(d_slen);
cudaFree(d_soffsets);
cudaFree(d_mss);
cudaFree(d_mss_offsets);
cudaFree(d_params);
cudaFree(d_3d_cost_matrix);
}
}
|
766c6c53b0678e74e413163704a0ccfe5560d33d.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr_mm.cu
* \brief SpSpMM/SpGEMM C APIs and definitions.
*/
#include <dgl/array.h>
#include <dgl/runtime/device_api.h>
#include "functor.cuh"
#include "./cusparse_dispatcher.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace dgl::runtime;
namespace aten {
namespace cusparse {
#if __CUDACC_VER_MAJOR__ == 11
/*! \brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
// We use Spgemm (SpSpMM) to perform following operation:
// C = A x B, where A, B and C are sparse matrices in csr format.
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
const DType alpha = 1.0;
const DType beta = 0.0;
auto transA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
auto transB = HIPSPARSE_OPERATION_NON_TRANSPOSE;
// device
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
// all one data array
hipsparseSpMatDescr_t matA, matB, matC;
IdArray dC_csrOffsets = IdArray::Empty({A.num_rows+1}, A.indptr->dtype, A.indptr->ctx);
IdType* dC_csrOffsets_data = dC_csrOffsets.Ptr<IdType>();
constexpr auto idtype = cusparse_idtype<IdType>::value;
constexpr auto dtype = cuda_dtype<DType>::value;
// Create sparse matrix A, B and C in CSR format
CUSPARSE_CALL(hipsparseCreateCsr(&matA,
A.num_rows, A.num_cols, nnzA,
A.indptr.Ptr<DType>(),
A.indices.Ptr<DType>(),
const_cast<DType*>(A_weights), // hipsparseCreateCsr only accepts non-const pointers
idtype, idtype, HIPSPARSE_INDEX_BASE_ZERO, dtype));
CUSPARSE_CALL(hipsparseCreateCsr(&matB,
B.num_rows, B.num_cols, nnzB,
B.indptr.Ptr<DType>(),
B.indices.Ptr<DType>(),
const_cast<DType*>(B_weights), // hipsparseCreateCsr only accepts non-const pointers
idtype, idtype, HIPSPARSE_INDEX_BASE_ZERO, dtype));
CUSPARSE_CALL(hipsparseCreateCsr(&matC,
A.num_rows, B.num_cols, 0,
nullptr, nullptr, nullptr, idtype, idtype,
HIPSPARSE_INDEX_BASE_ZERO, dtype));
// SpGEMM Computation
hipsparseSpGEMMDescr_t spgemmDesc;
CUSPARSE_CALL(hipsparseSpGEMM_createDescr(&spgemmDesc));
size_t workspace_size1 = 0, workspace_size2 = 0;
// ask bufferSize1 bytes for external memory
CUSPARSE_CALL(hipsparseSpGEMM_workEstimation(
thr_entry->cusparse_handle, transA, transB,
&alpha, matA, matB, &beta, matC, dtype,
HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size1,
NULL));
void* workspace1 = (device->AllocWorkspace(ctx, workspace_size1));
// inspect the matrices A and B to understand the memory requiremnent
// for the next step
CUSPARSE_CALL(hipsparseSpGEMM_workEstimation(
thr_entry->cusparse_handle, transA, transB,
&alpha, matA, matB, &beta, matC, dtype,
HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size1,
workspace1));
// ask bufferSize2 bytes for external memory
CUSPARSE_CALL(hipsparseSpGEMM_compute(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size2,
NULL));
void* workspace2 = device->AllocWorkspace(ctx, workspace_size2);
// compute the intermediate product of A * B
CUSPARSE_CALL(hipsparseSpGEMM_compute(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size2,
workspace2));
// get matrix C non-zero entries C_nnz1
int64_t C_num_rows1, C_num_cols1, C_nnz1;
CUSPARSE_CALL(hipsparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_nnz1));
IdArray dC_columns = IdArray::Empty({C_nnz1}, A.indptr->dtype, A.indptr->ctx);
NDArray dC_weights = NDArray::Empty({C_nnz1}, A_weights_array->dtype, A.indptr->ctx);
IdType* dC_columns_data = dC_columns.Ptr<IdType>();
DType* dC_weights_data = dC_weights.Ptr<DType>();
// update matC with the new pointers
CUSPARSE_CALL(hipsparseCsrSetPointers(matC, dC_csrOffsets_data,
dC_columns_data, dC_weights_data));
// copy the final products to the matrix C
CUSPARSE_CALL(hipsparseSpGEMM_copy(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc));
device->FreeWorkspace(ctx, workspace1);
device->FreeWorkspace(ctx, workspace2);
// destroy matrix/vector descriptors
CUSPARSE_CALL(hipsparseSpGEMM_destroyDescr(spgemmDesc));
CUSPARSE_CALL(hipsparseDestroySpMat(matA));
CUSPARSE_CALL(hipsparseDestroySpMat(matB));
CUSPARSE_CALL(hipsparseDestroySpMat(matC));
return {CSRMatrix(A.num_rows, B.num_cols, dC_csrOffsets, dC_columns), dC_weights};
}
#else // __CUDACC_VER_MAJOR__ != 11
/*! \brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
int nnzC;
csrgemm2Info_t info = nullptr;
size_t workspace_size;
const DType alpha = 1.;
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
const int m = A.num_rows;
const int n = A.num_cols;
const int k = B.num_cols;
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto idtype = A.indptr->dtype;
auto dtype = A_weights_array->dtype;
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
CUSPARSE_CALL(hipsparseSetPointerMode(
thr_entry->cusparse_handle, HIPSPARSE_POINTER_MODE_HOST));
CUSPARSE_CALL(hipsparseCreateCsrgemm2Info(&info));
hipsparseMatDescr_t matA, matB, matC, matD;
CUSPARSE_CALL(hipsparseCreateMatDescr(&matA));
CUSPARSE_CALL(hipsparseCreateMatDescr(&matB));
CUSPARSE_CALL(hipsparseCreateMatDescr(&matC));
CUSPARSE_CALL(hipsparseCreateMatDescr(&matD)); // needed even if D is null
CUSPARSE_CALL(CSRGEMM<DType>::bufferSizeExt(thr_entry->cusparse_handle,
m, n, k, &alpha,
matA, nnzA, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
nullptr,
matD, 0, nullptr, nullptr,
info,
&workspace_size));
void *workspace = device->AllocWorkspace(ctx, workspace_size);
IdArray C_indptr = IdArray::Empty({m + 1}, idtype, ctx);
CUSPARSE_CALL(CSRGEMM<DType>::nnz(thr_entry->cusparse_handle,
m, n, k,
matA, nnzA, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
matD, 0, nullptr, nullptr,
matC, C_indptr.Ptr<IdType>(), &nnzC, info, workspace));
IdArray C_indices = IdArray::Empty({nnzC}, idtype, ctx);
NDArray C_weights = NDArray::Empty({nnzC}, dtype, ctx);
CUSPARSE_CALL(CSRGEMM<DType>::compute(thr_entry->cusparse_handle,
m, n, k, &alpha,
matA, nnzA, A_weights, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B_weights, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
nullptr,
matD, 0, nullptr, nullptr, nullptr,
matC, C_weights.Ptr<DType>(), C_indptr.Ptr<IdType>(), C_indices.Ptr<IdType>(),
info, workspace));
device->FreeWorkspace(ctx, workspace);
CUSPARSE_CALL(hipsparseDestroyCsrgemm2Info(info));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matA));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matB));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matC));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matD));
return {CSRMatrix(m, k, C_indptr, C_indices), C_weights};
}
#endif // __CUDACC_VER_MAJOR__ == 11
} // namespace cusparse
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRMM(
const CSRMatrix& A,
NDArray A_weights,
const CSRMatrix& B,
NDArray B_weights) {
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
CSRMatrix newA, newB;
bool cast = false;
// Cast 64 bit indices to 32 bit.
if (A.indptr->dtype.bits == 64) {
newA = CSRMatrix(
A.num_rows, A.num_cols,
AsNumBits(A.indptr, 32), AsNumBits(A.indices, 32), AsNumBits(A.data, 32));
newB = CSRMatrix(
B.num_rows, B.num_cols,
AsNumBits(B.indptr, 32), AsNumBits(B.indices, 32), AsNumBits(B.data, 32));
cast = true;
}
// Reorder weights if A or B has edge IDs
NDArray newA_weights, newB_weights;
if (CSRHasData(A))
newA_weights = IndexSelect(A_weights, A.data);
if (CSRHasData(B))
newB_weights = IndexSelect(B_weights, B.data);
auto result = cusparse::CusparseSpgemm<DType, int32_t>(
cast ? newA : A, CSRHasData(A) ? newA_weights : A_weights,
cast ? newB : B, CSRHasData(B) ? newB_weights : B_weights);
// Cast 32 bit indices back to 64 bit if necessary
if (cast) {
CSRMatrix C = result.first;
return {
CSRMatrix(C.num_rows, C.num_cols, AsNumBits(C.indptr, 64), AsNumBits(C.indices, 64)),
result.second};
} else {
return result;
}
}
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int32_t, float>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int64_t, float>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int32_t, double>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int64_t, double>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
} // namespace aten
} // namespace dgl
| 766c6c53b0678e74e413163704a0ccfe5560d33d.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr_mm.cu
* \brief SpSpMM/SpGEMM C APIs and definitions.
*/
#include <dgl/array.h>
#include <dgl/runtime/device_api.h>
#include "./functor.cuh"
#include "./cusparse_dispatcher.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace dgl::runtime;
namespace aten {
namespace cusparse {
#if __CUDACC_VER_MAJOR__ == 11
/*! \brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
// We use Spgemm (SpSpMM) to perform following operation:
// C = A x B, where A, B and C are sparse matrices in csr format.
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
const DType alpha = 1.0;
const DType beta = 0.0;
auto transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
auto transB = CUSPARSE_OPERATION_NON_TRANSPOSE;
// device
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
// all one data array
cusparseSpMatDescr_t matA, matB, matC;
IdArray dC_csrOffsets = IdArray::Empty({A.num_rows+1}, A.indptr->dtype, A.indptr->ctx);
IdType* dC_csrOffsets_data = dC_csrOffsets.Ptr<IdType>();
constexpr auto idtype = cusparse_idtype<IdType>::value;
constexpr auto dtype = cuda_dtype<DType>::value;
// Create sparse matrix A, B and C in CSR format
CUSPARSE_CALL(cusparseCreateCsr(&matA,
A.num_rows, A.num_cols, nnzA,
A.indptr.Ptr<DType>(),
A.indices.Ptr<DType>(),
const_cast<DType*>(A_weights), // cusparseCreateCsr only accepts non-const pointers
idtype, idtype, CUSPARSE_INDEX_BASE_ZERO, dtype));
CUSPARSE_CALL(cusparseCreateCsr(&matB,
B.num_rows, B.num_cols, nnzB,
B.indptr.Ptr<DType>(),
B.indices.Ptr<DType>(),
const_cast<DType*>(B_weights), // cusparseCreateCsr only accepts non-const pointers
idtype, idtype, CUSPARSE_INDEX_BASE_ZERO, dtype));
CUSPARSE_CALL(cusparseCreateCsr(&matC,
A.num_rows, B.num_cols, 0,
nullptr, nullptr, nullptr, idtype, idtype,
CUSPARSE_INDEX_BASE_ZERO, dtype));
// SpGEMM Computation
cusparseSpGEMMDescr_t spgemmDesc;
CUSPARSE_CALL(cusparseSpGEMM_createDescr(&spgemmDesc));
size_t workspace_size1 = 0, workspace_size2 = 0;
// ask bufferSize1 bytes for external memory
CUSPARSE_CALL(cusparseSpGEMM_workEstimation(
thr_entry->cusparse_handle, transA, transB,
&alpha, matA, matB, &beta, matC, dtype,
CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size1,
NULL));
void* workspace1 = (device->AllocWorkspace(ctx, workspace_size1));
// inspect the matrices A and B to understand the memory requiremnent
// for the next step
CUSPARSE_CALL(cusparseSpGEMM_workEstimation(
thr_entry->cusparse_handle, transA, transB,
&alpha, matA, matB, &beta, matC, dtype,
CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size1,
workspace1));
// ask bufferSize2 bytes for external memory
CUSPARSE_CALL(cusparseSpGEMM_compute(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size2,
NULL));
void* workspace2 = device->AllocWorkspace(ctx, workspace_size2);
// compute the intermediate product of A * B
CUSPARSE_CALL(cusparseSpGEMM_compute(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &workspace_size2,
workspace2));
// get matrix C non-zero entries C_nnz1
int64_t C_num_rows1, C_num_cols1, C_nnz1;
CUSPARSE_CALL(cusparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_nnz1));
IdArray dC_columns = IdArray::Empty({C_nnz1}, A.indptr->dtype, A.indptr->ctx);
NDArray dC_weights = NDArray::Empty({C_nnz1}, A_weights_array->dtype, A.indptr->ctx);
IdType* dC_columns_data = dC_columns.Ptr<IdType>();
DType* dC_weights_data = dC_weights.Ptr<DType>();
// update matC with the new pointers
CUSPARSE_CALL(cusparseCsrSetPointers(matC, dC_csrOffsets_data,
dC_columns_data, dC_weights_data));
// copy the final products to the matrix C
CUSPARSE_CALL(cusparseSpGEMM_copy(thr_entry->cusparse_handle,
transA, transB, &alpha, matA, matB, &beta, matC,
dtype, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc));
device->FreeWorkspace(ctx, workspace1);
device->FreeWorkspace(ctx, workspace2);
// destroy matrix/vector descriptors
CUSPARSE_CALL(cusparseSpGEMM_destroyDescr(spgemmDesc));
CUSPARSE_CALL(cusparseDestroySpMat(matA));
CUSPARSE_CALL(cusparseDestroySpMat(matB));
CUSPARSE_CALL(cusparseDestroySpMat(matC));
return {CSRMatrix(A.num_rows, B.num_cols, dC_csrOffsets, dC_columns), dC_weights};
}
#else // __CUDACC_VER_MAJOR__ != 11
/*! \brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
int nnzC;
csrgemm2Info_t info = nullptr;
size_t workspace_size;
const DType alpha = 1.;
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
const int m = A.num_rows;
const int n = A.num_cols;
const int k = B.num_cols;
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto idtype = A.indptr->dtype;
auto dtype = A_weights_array->dtype;
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
CUSPARSE_CALL(cusparseSetPointerMode(
thr_entry->cusparse_handle, CUSPARSE_POINTER_MODE_HOST));
CUSPARSE_CALL(cusparseCreateCsrgemm2Info(&info));
cusparseMatDescr_t matA, matB, matC, matD;
CUSPARSE_CALL(cusparseCreateMatDescr(&matA));
CUSPARSE_CALL(cusparseCreateMatDescr(&matB));
CUSPARSE_CALL(cusparseCreateMatDescr(&matC));
CUSPARSE_CALL(cusparseCreateMatDescr(&matD)); // needed even if D is null
CUSPARSE_CALL(CSRGEMM<DType>::bufferSizeExt(thr_entry->cusparse_handle,
m, n, k, &alpha,
matA, nnzA, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
nullptr,
matD, 0, nullptr, nullptr,
info,
&workspace_size));
void *workspace = device->AllocWorkspace(ctx, workspace_size);
IdArray C_indptr = IdArray::Empty({m + 1}, idtype, ctx);
CUSPARSE_CALL(CSRGEMM<DType>::nnz(thr_entry->cusparse_handle,
m, n, k,
matA, nnzA, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
matD, 0, nullptr, nullptr,
matC, C_indptr.Ptr<IdType>(), &nnzC, info, workspace));
IdArray C_indices = IdArray::Empty({nnzC}, idtype, ctx);
NDArray C_weights = NDArray::Empty({nnzC}, dtype, ctx);
CUSPARSE_CALL(CSRGEMM<DType>::compute(thr_entry->cusparse_handle,
m, n, k, &alpha,
matA, nnzA, A_weights, A.indptr.Ptr<IdType>(), A.indices.Ptr<IdType>(),
matB, nnzB, B_weights, B.indptr.Ptr<IdType>(), B.indices.Ptr<IdType>(),
nullptr,
matD, 0, nullptr, nullptr, nullptr,
matC, C_weights.Ptr<DType>(), C_indptr.Ptr<IdType>(), C_indices.Ptr<IdType>(),
info, workspace));
device->FreeWorkspace(ctx, workspace);
CUSPARSE_CALL(cusparseDestroyCsrgemm2Info(info));
CUSPARSE_CALL(cusparseDestroyMatDescr(matA));
CUSPARSE_CALL(cusparseDestroyMatDescr(matB));
CUSPARSE_CALL(cusparseDestroyMatDescr(matC));
CUSPARSE_CALL(cusparseDestroyMatDescr(matD));
return {CSRMatrix(m, k, C_indptr, C_indices), C_weights};
}
#endif // __CUDACC_VER_MAJOR__ == 11
} // namespace cusparse
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRMM(
const CSRMatrix& A,
NDArray A_weights,
const CSRMatrix& B,
NDArray B_weights) {
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
CSRMatrix newA, newB;
bool cast = false;
// Cast 64 bit indices to 32 bit.
if (A.indptr->dtype.bits == 64) {
newA = CSRMatrix(
A.num_rows, A.num_cols,
AsNumBits(A.indptr, 32), AsNumBits(A.indices, 32), AsNumBits(A.data, 32));
newB = CSRMatrix(
B.num_rows, B.num_cols,
AsNumBits(B.indptr, 32), AsNumBits(B.indices, 32), AsNumBits(B.data, 32));
cast = true;
}
// Reorder weights if A or B has edge IDs
NDArray newA_weights, newB_weights;
if (CSRHasData(A))
newA_weights = IndexSelect(A_weights, A.data);
if (CSRHasData(B))
newB_weights = IndexSelect(B_weights, B.data);
auto result = cusparse::CusparseSpgemm<DType, int32_t>(
cast ? newA : A, CSRHasData(A) ? newA_weights : A_weights,
cast ? newB : B, CSRHasData(B) ? newB_weights : B_weights);
// Cast 32 bit indices back to 64 bit if necessary
if (cast) {
CSRMatrix C = result.first;
return {
CSRMatrix(C.num_rows, C.num_cols, AsNumBits(C.indptr, 64), AsNumBits(C.indices, 64)),
result.second};
} else {
return result;
}
}
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int32_t, float>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int64_t, float>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int32_t, double>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
template std::pair<CSRMatrix, NDArray> CSRMM<kDLGPU, int64_t, double>(
const CSRMatrix&, NDArray, const CSRMatrix&, NDArray);
} // namespace aten
} // namespace dgl
|
d752b365d3cae087e0377634d890ccc05a2c01f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void bitwise_and_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a && b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_and_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a & b;
});
});
}
}
void bitwise_or_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a || b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_or_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a | b;
});
});
}
}
void bitwise_xor_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
// Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and
// integral types.
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a != b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a ^ b;
});
});
}
}
void logical_and_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
void smooth_l1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto z = fabs(a - b);
return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5);
});
});
}
void sigmoid_backward_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
});
}
void tanh_backward_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "tanh_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b * b);
});
});
}
void mse_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda);
REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
}} // namespace at::native
| d752b365d3cae087e0377634d890ccc05a2c01f1.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void bitwise_and_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a && b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_and_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a & b;
});
});
}
}
void bitwise_or_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a || b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_or_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a | b;
});
});
}
}
void bitwise_xor_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
// Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and
// integral types.
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(bool a, bool b) {
return a != b;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a ^ b;
});
});
}
}
void logical_and_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
void smooth_l1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto z = fabs(a - b);
return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5);
});
});
}
void sigmoid_backward_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
});
}
void tanh_backward_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "tanh_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b * b);
});
});
}
void mse_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda);
REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
}} // namespace at::native
|
5182a3f6a250dc142a35bb503111a0ebc506ad20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*$Id: computeHash.cu 2016-02-05 19:42:18 (author: Reza Mokhtari)$*/
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <sys/time.h>
__global__ void hashKernel(char* input, int size, int* indices, int* hashOutput)
{
int index = blockIdx.x * blockDim.x + threadIdx.x; /* assuming 1D grid and block */
//Each thread hashes the data from indices[index] to indices[index + 1]
int start = indices[index];
int end = indices[index + 1];
unsigned hash = 2166136261;
unsigned FNVMultiple = 16777619;
for(int i = start; i < end; i += sizeof(int))
{
int arrayValue = *((int*) (input + i));
hash += arrayValue;
hash = hash ^ (arrayValue); /* xor the entire 32 bits */
hash -= arrayValue;
hash = hash * FNVMultiple; /* multiply by the magic number */
hash *= (arrayValue == 0)? 1 : arrayValue;
}
hashOutput[index] = hash;
}
int main(int argc, char** argv)
{
int fd;
char *fdata;
struct stat finfo;
char *fname;
if (argc < 2)
{
printf("USAGE: %s <inputfilename>\n", argv[0]);
exit(1);
}
fname = argv[1];
fd = open(fname, O_RDONLY);
fstat(fd, &finfo);
printf("Allocating %lluMB for the input file.\n", ((long long unsigned int)finfo.st_size) / (1 << 20));
fdata = (char *) malloc(finfo.st_size);
size_t successRead = read (fd, fdata, finfo.st_size);
size_t fileSize = (size_t) finfo.st_size;
if(successRead != fileSize)
{
printf("Not all of the file is read, terminating...\n"); /* happens when input data is too large. Not going to handle this for now */
exit(1);
}
//setting fixed number of threads, do not modify.
dim3 grid(8, 1, 1);
dim3 block(512, 1, 1);
int numThreads = grid.x * block.x;
int* indices = (int*) malloc((numThreads + 1) * sizeof(int));
//calculating indices. Each index shows the point from which a thread starts hashing the input data
int inputChunkSize = (fileSize + (numThreads - 1)) / numThreads;
//change the inputChunkSize to a value that can be dividable by 4
inputChunkSize -= (inputChunkSize % 4);
for(int i = 0; i < numThreads ; i ++)
indices[i] = i * inputChunkSize; /* last thread(s) might go out of boundary, but gonna be handled in the kernel */
//Setting the (last + 1) index
indices[numThreads] = (int) fileSize;
int* d_indices;
hipMalloc((void**) &d_indices, numThreads * sizeof(int));
hipMemcpy(d_indices, indices, numThreads * sizeof(int), hipMemcpyHostToDevice);
char* d_input;
hipMalloc((void**) &d_input, fileSize);
hipMemcpy(d_input, fdata, fileSize, hipMemcpyHostToDevice);
//Each thread will store its hash value in an element of this array.
int* d_hashOutput;
hipMalloc((void**) &d_hashOutput, numThreads * sizeof(int));
hipMemset(d_hashOutput, 0, numThreads * sizeof(int));
struct timeval partial_start, partial_end;
time_t sec, ms, diff;
gettimeofday(&partial_start, NULL);
hipLaunchKernelGGL(( hashKernel), dim3(grid), dim3(block), 0, 0, d_input, fileSize, d_indices, d_hashOutput);
hipDeviceSynchronize();
hipError_t errR = hipGetLastError();
if(errR != hipSuccess)
{
printf("Kernel returned an error, terminating...\n");
exit(1);
}
gettimeofday(&partial_end, NULL);
sec = partial_end.tv_sec - partial_start.tv_sec;
ms = partial_end.tv_usec - partial_start.tv_usec;
diff = sec * 1000000 + ms;
printf("\n%10s:\t\t%0.1fms\n", "Kernel elapsed time", (double)((double)diff/1000.0));
int* hashOutput = (int*) malloc(numThreads * sizeof(int));
hipMemcpy(hashOutput, d_hashOutput, numThreads * sizeof(int), hipMemcpyDeviceToHost);
//Summing up the threads' hash values to form one final hash value.
int finalHashValue = 0;
for(int i = 0; i < numThreads; i ++)
finalHashValue += hashOutput[i];
printf("Final hash value: %d\n", finalHashValue);
return 0;
}
| 5182a3f6a250dc142a35bb503111a0ebc506ad20.cu | /*$Id: computeHash.cu 2016-02-05 19:42:18 (author: Reza Mokhtari)$*/
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <sys/time.h>
__global__ void hashKernel(char* input, int size, int* indices, int* hashOutput)
{
int index = blockIdx.x * blockDim.x + threadIdx.x; /* assuming 1D grid and block */
//Each thread hashes the data from indices[index] to indices[index + 1]
int start = indices[index];
int end = indices[index + 1];
unsigned hash = 2166136261;
unsigned FNVMultiple = 16777619;
for(int i = start; i < end; i += sizeof(int))
{
int arrayValue = *((int*) (input + i));
hash += arrayValue;
hash = hash ^ (arrayValue); /* xor the entire 32 bits */
hash -= arrayValue;
hash = hash * FNVMultiple; /* multiply by the magic number */
hash *= (arrayValue == 0)? 1 : arrayValue;
}
hashOutput[index] = hash;
}
int main(int argc, char** argv)
{
int fd;
char *fdata;
struct stat finfo;
char *fname;
if (argc < 2)
{
printf("USAGE: %s <inputfilename>\n", argv[0]);
exit(1);
}
fname = argv[1];
fd = open(fname, O_RDONLY);
fstat(fd, &finfo);
printf("Allocating %lluMB for the input file.\n", ((long long unsigned int)finfo.st_size) / (1 << 20));
fdata = (char *) malloc(finfo.st_size);
size_t successRead = read (fd, fdata, finfo.st_size);
size_t fileSize = (size_t) finfo.st_size;
if(successRead != fileSize)
{
printf("Not all of the file is read, terminating...\n"); /* happens when input data is too large. Not going to handle this for now */
exit(1);
}
//setting fixed number of threads, do not modify.
dim3 grid(8, 1, 1);
dim3 block(512, 1, 1);
int numThreads = grid.x * block.x;
int* indices = (int*) malloc((numThreads + 1) * sizeof(int));
//calculating indices. Each index shows the point from which a thread starts hashing the input data
int inputChunkSize = (fileSize + (numThreads - 1)) / numThreads;
//change the inputChunkSize to a value that can be dividable by 4
inputChunkSize -= (inputChunkSize % 4);
for(int i = 0; i < numThreads ; i ++)
indices[i] = i * inputChunkSize; /* last thread(s) might go out of boundary, but gonna be handled in the kernel */
//Setting the (last + 1) index
indices[numThreads] = (int) fileSize;
int* d_indices;
cudaMalloc((void**) &d_indices, numThreads * sizeof(int));
cudaMemcpy(d_indices, indices, numThreads * sizeof(int), cudaMemcpyHostToDevice);
char* d_input;
cudaMalloc((void**) &d_input, fileSize);
cudaMemcpy(d_input, fdata, fileSize, cudaMemcpyHostToDevice);
//Each thread will store its hash value in an element of this array.
int* d_hashOutput;
cudaMalloc((void**) &d_hashOutput, numThreads * sizeof(int));
cudaMemset(d_hashOutput, 0, numThreads * sizeof(int));
struct timeval partial_start, partial_end;
time_t sec, ms, diff;
gettimeofday(&partial_start, NULL);
hashKernel<<<grid, block>>>(d_input, fileSize, d_indices, d_hashOutput);
cudaThreadSynchronize();
cudaError_t errR = cudaGetLastError();
if(errR != cudaSuccess)
{
printf("Kernel returned an error, terminating...\n");
exit(1);
}
gettimeofday(&partial_end, NULL);
sec = partial_end.tv_sec - partial_start.tv_sec;
ms = partial_end.tv_usec - partial_start.tv_usec;
diff = sec * 1000000 + ms;
printf("\n%10s:\t\t%0.1fms\n", "Kernel elapsed time", (double)((double)diff/1000.0));
int* hashOutput = (int*) malloc(numThreads * sizeof(int));
cudaMemcpy(hashOutput, d_hashOutput, numThreads * sizeof(int), cudaMemcpyDeviceToHost);
//Summing up the threads' hash values to form one final hash value.
int finalHashValue = 0;
for(int i = 0; i < numThreads; i ++)
finalHashValue += hashOutput[i];
printf("Final hash value: %d\n", finalHashValue);
return 0;
}
|
74f93dbc46cf0d2281d3ff5f38f7dc936f78c1ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <stdbool.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "linalg.h"
#include "matrix.h"
#include "scan.h"
#include "utility.h"
#include "utility_cuda.cuh"
__device__ int next_pivot_row = 0;
//dichiarazione variabili globali
int max_degree = 0;
int module = 0;
//----------------------------------------------------------------------------------------------------------------
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void find_pivot(int *matrix, int row, int col, int r, int pivot_col) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int thread_row = r + tid;
if (thread_row >= row)
return;
if (matrix[thread_row*col + pivot_col] != 0) {
atomicMin(&next_pivot_row, thread_row);
}
}
__global__ void submatrix_reduction_by_cell(int *matrix, int row, int col, int module, int inv, int pivot_col, int pivot_row) {
int starting_row = pivot_row + 1;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y + starting_row;
if (idx < pivot_col && idy < row && idy > pivot_row) { //fermo i thread prima di pivot_colonna per impedire di sovrascrivere il dato necessario per s
int div = matrix[idy*col + pivot_col];
if (div != 0) {
int s = mul_mod_GPU(inv, div, module);
int a = mul_mod_GPU(s, matrix[pivot_row*col + idx], module);
matrix[idy*col + idx] = sub_mod_GPU(matrix[idy*col + idx], a, module);
}
}
}
__global__ void gaussian_reduction_by_cell(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
while (r < row && matrix[r*col + pivot_col] == 0) { //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if (r < row) { //significa che ho trovato un valore non nullo
if (r != rows_found) {
swap_rows_GPU(matrix, row, col, rows_found, r); //sposto la riga appena trovata nella posizone corretta
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module); //inverso dell elemento in m[r][pivot_colonna]
//kernel per riduzione celle
int block_dim = 16;
dim3 threads(block_dim, block_dim, 1);
int number_of_rows = row - rows_found;
int grid_y = number_of_rows / block_dim + 1;
int grid_x = col / block_dim + 1;
dim3 blocks(grid_x, grid_y, 1);
hipLaunchKernelGGL(( submatrix_reduction_by_cell), dim3(blocks), dim3(threads), 0, 0, matrix, row, col, module, inv, pivot_col, pivot_row);
hipDeviceSynchronize();
//necessario azzerare tutta la colonna (pivot_colonna)
for (int x = pivot_row + 1; x < row; x++) {
matrix[x*col + pivot_col] = 0;
}
}
}
}
__global__ void submatrix_reduction_by_row(int *matrix, int row, int col, int module, int start, int pivot_col, int inv, int pivot_row, int cell_per_thread) {
extern __shared__ int smem[];
if ((threadIdx.x * cell_per_thread) <= pivot_col) {
int row_offset = pivot_row * col;
int thread_offset = threadIdx.x * cell_per_thread;
//allocazione della smem con la riga di pivot, ogni thread copia una porzione di riga pari a "cell_per_thread".
for (int i = 0; i<cell_per_thread; i++) {
if (thread_offset + i <= pivot_col) {
smem[thread_offset + i] = matrix[row_offset + thread_offset + i];
}
}
}
__syncthreads();
int x = 0, y = 0;
int row_index = (pivot_row + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if (row_index >= start && row_index < row) {
int row_linear_index = row_index * col + pivot_col;
if (matrix[row_linear_index] != 0) {
y = mul_mod_GPU(inv, matrix[row_linear_index], module);
for (int k = 0; k < pivot_col + 1; k++) {
//a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
x = mul_mod_GPU(y, smem[k], module);
matrix[row_index*col + k] = sub_mod_GPU(matrix[row_index*col + k], x, module);
}
}
}
}
__global__ void gaussian_reduction_by_row(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv, block_dim, threads_per_block, block_x_axis;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
block_dim = 256;
int row_to_check = row - rows_found;
threads_per_block = (row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if (threads_per_block == block_dim && row_to_check != block_dim) {
block_x_axis = (row_to_check / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot << <b_find, t_find >> > (matrix, row, col, r, pivot_col);
hipDeviceSynchronize();
r = next_pivot_row;
if (r < row) {
if (r != rows_found) {
block_dim = 256;
threads_per_block = (col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if (threads_per_block == block_dim && col != block_dim) {
block_x_axis = (col / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows << <b_swap, t_swap >> > (matrix, row, col, rows_found, r);
hipDeviceSynchronize();
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module); //inverso dell elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - rows_found;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if (t == block_dim && numero_righe != block_dim) {
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_col + 1;
int cell_per_thread = (t >= pivot_length) ? 1 : (pivot_length / t) + 1;
int shared_mem = pivot_length * sizeof(int);
hipLaunchKernelGGL(( submatrix_reduction_by_row), dim3(blocks), dim3(threads), shared_mem , 0, matrix, row, col, module, rows_found, pivot_col, inv, pivot_row, cell_per_thread);
hipDeviceSynchronize();
}
}
}
__global__ void submatrix_reduction_by_block(int *matrix, int row, int col, int module, int pivot_col, int inv, int pivot_row, int thread_height, int block_dim) {
extern __shared__ int smem[];
int *smem_pivot_row = (int*)smem;
int *smem_pivot_col = (int*)&smem_pivot_row[block_dim];
int x = 0, y = 0, interation = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
//-------------
//inizzializzazione smem per pivot riga
smem_pivot_row[threadIdx.x] = matrix[pivot_row * col + col_index]; //ogni thread copia un solo elemento nella riga in shared, un thread per cella di riga
//------------
//inizializzazione smem per pivot colonna
//calcolo del numero di celle (colonna_pivot) che ogni thred deve copiare
int cell_to_copy = 1;
if (thread_height > blockDim.x) {
cell_to_copy = thread_height / blockDim.x + 1;
}
int base_row = (pivot_row + 1) + blockIdx.y * thread_height;
int index = 0;
//copia della porzione di colonna in smem
for (int i = 0; i<cell_to_copy; i++) {
index = (threadIdx.x * cell_to_copy) + i;
if (base_row + index < row && index < thread_height) {
smem_pivot_col[index] = matrix[(base_row + index) * col + pivot_col];
}
}
//sincronizza tutti i thread del blocco in modo tale che la smem sia consistente
__syncthreads();
if (col_index < pivot_col) {
//calcolo del numero di righe sulle quali deve iterare il thread, caso in cui la dimensione della matrice non collima con thread_height
int reached_row = (pivot_row + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if (reached_row > row) {
interation = thread_height - (reached_row - row); //dimensione non collima
}
else {
interation = thread_height; //caso normale
}
int row_offset = (pivot_row + 1) + (blockIdx.y * thread_height);
for (int i = 0; i<interation; i++) {
int pivot_element = smem_pivot_col[i];
if (pivot_element != 0) {
y = mul_mod_GPU(inv, pivot_element, module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
x = mul_mod_GPU(y, smem_pivot_row[threadIdx.x], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], x, module);
}
row_offset++;
}
}
}
__global__ void gaussian_reduction_by_block(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
block_dim = 256; //base 256
int row_to_check = row - rows_found;
threads_per_block = (row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if (threads_per_block == block_dim && row_to_check != block_dim) {
block_x_axis = (row_to_check / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot << <b_find, t_find >> >(matrix, row, col, r, pivot_col);
hipDeviceSynchronize();
r = next_pivot_row;
/////////////////////////////////////////////////////////////////////////////////
if (r < row) {
if (r != rows_found) {
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
block_dim = 256; //base 256
threads_per_block = (col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if (threads_per_block == block_dim && col != block_dim) {
block_x_axis = (col / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows << <b_swap, t_swap >> >(matrix, row, col, rows_found, r);
hipDeviceSynchronize();
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128; //base 128
int col_to_reduce = pivot_col;
threads_per_block = (col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if (threads_per_block == block_dim && col_to_reduce != block_dim) {
block_x_axis = (col_to_reduce / block_dim) + 1;
}
else {
block_x_axis = 1;
}
int thread_height = 32; //base 256
int row_to_reduce = row - rows_found;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
int shared = (block_dim * sizeof(int)) + (thread_height * sizeof(int));
hipLaunchKernelGGL(( submatrix_reduction_by_block) , dim3(blocks), dim3(threads), shared, 0, matrix, row, col, module, pivot_col, inv, pivot_row, thread_height, block_dim);
hipDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
thread_height = 50; //base 100
block_dim = 32; //base 32
row_to_reduce = row - pivot_row;
threads_per_block = (row_to_reduce < thread_height ? 1 : block_dim);
block_x_axis = (threads_per_block == block_dim && row_to_reduce != block_dim) ? (row_to_reduce / (thread_height*block_dim) + 1) : 1;
dim3 t(threads_per_block);
dim3 b(block_x_axis);
hipLaunchKernelGGL(( reset_pivot_col) , dim3(b), dim3(t) , 0, 0, matrix, row, col, pivot_row, pivot_col, thread_height, block_dim);
hipDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
double gauss_CUDA(int *m, int row, int col, int module) {
int matrix_length = row * col;
int matrix_length_bytes = matrix_length * sizeof(int);
clock_t start, end;
double elapsed = 0.0;
int *m_d;
gpuErrchk(hipMalloc((void **)&m_d, matrix_length_bytes));
gpuErrchk(hipMemcpy(m_d, m, matrix_length_bytes, hipMemcpyHostToDevice));
start = clock();
hipLaunchKernelGGL(( gaussian_reduction_by_row), dim3(1), dim3(1), 0, 0, m_d, row, col, module);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(m, m_d, matrix_length_bytes, hipMemcpyDeviceToHost));
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
gpuErrchk(hipFree(m_d));
return elapsed;
}
void resolve_system(int **matrix, int * row, int col, struct map map, int *degree, int **monomi, int numero_variabili, int n_loops, int expansion, FILE *output_file) {
///////////////////////////// INIZIALIZZAZIONE STRUTTURE DI SUPPORTO ////////////////////////////
clock_t start, end;
double elapsed;
//creo l'array che conterr i gradi dei vari round
int **m_deg_array = (int **)malloc(sizeof(int*));
m_deg_array[0] = (int *)calloc(max_degree + 1, sizeof(int));
int n_round = 0;
int *m_deg = m_deg_array[0];
int missing_degree = max_degree;
fprintf(output_file, "Inizio computazione, metodo standard\n");
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
///////////////////////////////////////////////////////////////////////////////////////
int stop = 0;
while (stop != 1) {
n_round++;
fprintf(output_file, "\n -Eseguo moltiplicazione, ");
fflush(stdout);
start = clock();
//find missing degree to multiply matrix
for (int i = max_degree; i>0; i--) {
if (m_deg[i] == 0) {
missing_degree = i;
break;
}
}
/////////////// ESPANSIONE SISTEMA ////////////////////////////////
moltiplica_matrice(matrix, row, col, map, degree, monomi, numero_variabili, missing_degree, max_degree);
//////////////////////////////////////////////////////////////////
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)", *row, elapsed);
fprintf(output_file, "\n -Eseguo Gauss, ");
fflush(stdout);
///////////////////// RIDUZIONE SISTEMA //////////////////////////////
elapsed = gauss_CUDA(*matrix, *row, col, module);
//elimino le righe nulle della matrice
eliminate_null_rows(matrix, row, col);
m_deg_array = (int **)realloc(m_deg_array, sizeof(int*)*(n_round + 1));
m_deg_array[n_round] = (int *)calloc(max_degree + 1, sizeof(int));
m_deg = m_deg_array[n_round];
/////////////////////////////////////////////////////////////////////
//////////////// CALCOLO GRADI MANCANTI /////////////////////////////
fprintf(output_file, "numero righe: %d (%f sec)\n", *row, elapsed);
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
print_matrix_degree(m_deg, output_file, max_degree);
if (target_degree(m_deg, max_degree) == 0)
stop = 1;
}
for (int i = 0; i < n_round + 1; i++)
free(m_deg_array[i]);
free(m_deg_array);
}
int main(int argc, char *argv[]) {
/////////////////////// INIZIALIZZAZIONE ////////////////////////////////////////////
FILE *input_file = NULL, *output_file = NULL;
for (int parsed = 1; parsed < argc; parsed++) {
if (parsed < argc && !strcmp(argv[parsed], "--input")) {
parsed++;
input_file = fopen(argv[parsed], "r");
if (!input_file) {
perror("Errore nell'apertura del file di input");
return (-1);
}
}
else if (parsed < argc && !strcmp(argv[parsed], "--output")) {
parsed++;
output_file = fopen(argv[parsed], "w");
if (!output_file) {
perror("Errore nell'apertura del file di output");
return (-1);
}
}
}
if (!input_file)
input_file = stdin;
if (!output_file)
output_file = stdout;
int row, col, numero_variabili, tipo_ordinamento;
int *matrix;
char *variabili;
row = col = numero_variabili = 0;
int(*ord) (void*, const void *, const void *);
struct map smap;
clock_t start, end;
double elapsed = 0.0;
start = clock();
//////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////// ALLOCAZIONE STRUTTURE DI SUPPORTO ///////////////////////////////////////////
allocation(&matrix, &row, &col, &numero_variabili, &variabili, &tipo_ordinamento, &module, &max_degree, input_file);
if (order(&ord, tipo_ordinamento) != 0) {
fprintf(stderr, "Ordinamento insesistente!!!\n\nTERMINAZIONE PROGRAMMA");
return 0;
}
int * degree = (int *)calloc(max_degree + 1, sizeof(int));
int numero_monomi = col;
int **monomi;
//crea il vettore con tutti i possibili monomi avendo num_var varaibili e max_degree come massimo grado
monomi = monomial_computation(numero_variabili, max_degree, numero_monomi);
//ordina il vettore dei monomi secondo un determinato ordinamento, ordinamento intercambiabile
qsort_s(monomi, numero_monomi, sizeof(int*), ord, &numero_variabili);
//inizializzazione matrice (lettura dati input)
if (parse(numero_variabili, variabili, matrix, row, monomi, col, module, ord, input_file) == -1) {
fprintf(stderr, "Errore di input !!!\n\nTERMINAZIONE PROGRAMMA"); //se l'input in formato scorrettro abort del programma
return 0;
}
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nInizializzazione in %f sec\n", elapsed);
start = clock();
setup_struct_map(&smap, monomi, numero_monomi, numero_variabili, max_degree, ord);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nMappa creata in %f sec, %d x %d \n\n", elapsed, col, col);
/////////////////////////// RISOLUZIONE SISTEMA ////////////////////////////////////////////////
start = clock();
//inizializzazione vettore dei gradi dei polinomi
init_degree_vector(degree, numero_variabili, max_degree);
int n_loops = 30, expansion = 1;
//eseguo moltiplicazione e riduzione di Gauss finche non trovo soluzione
resolve_system(&matrix, &row, col, smap, degree, monomi, numero_variabili, n_loops, expansion, output_file);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nTarget raggiunto, soluzione trovata in %f sec\n\n", elapsed);
/////////////////////////////////////////////////////////////////////////////////////////////
print_incognite(matrix, row, col, numero_variabili, monomi, output_file);
free(matrix);
free(degree);
hipDeviceReset();
return 0;
}
| 74f93dbc46cf0d2281d3ff5f38f7dc936f78c1ec.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <stdbool.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "linalg.h"
#include "matrix.h"
#include "scan.h"
#include "utility.h"
#include "utility_cuda.cuh"
__device__ int next_pivot_row = 0;
//dichiarazione variabili globali
int max_degree = 0;
int module = 0;
//----------------------------------------------------------------------------------------------------------------
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void find_pivot(int *matrix, int row, int col, int r, int pivot_col) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int thread_row = r + tid;
if (thread_row >= row)
return;
if (matrix[thread_row*col + pivot_col] != 0) {
atomicMin(&next_pivot_row, thread_row);
}
}
__global__ void submatrix_reduction_by_cell(int *matrix, int row, int col, int module, int inv, int pivot_col, int pivot_row) {
int starting_row = pivot_row + 1;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y + starting_row;
if (idx < pivot_col && idy < row && idy > pivot_row) { //fermo i thread prima di pivot_colonna per impedire di sovrascrivere il dato necessario per s
int div = matrix[idy*col + pivot_col];
if (div != 0) {
int s = mul_mod_GPU(inv, div, module);
int a = mul_mod_GPU(s, matrix[pivot_row*col + idx], module);
matrix[idy*col + idx] = sub_mod_GPU(matrix[idy*col + idx], a, module);
}
}
}
__global__ void gaussian_reduction_by_cell(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
while (r < row && matrix[r*col + pivot_col] == 0) { //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if (r < row) { //significa che ho trovato un valore non nullo
if (r != rows_found) {
swap_rows_GPU(matrix, row, col, rows_found, r); //sposto la riga appena trovata nella posizone corretta
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module); //inverso dellī elemento in m[r][pivot_colonna]
//kernel per riduzione celle
int block_dim = 16;
dim3 threads(block_dim, block_dim, 1);
int number_of_rows = row - rows_found;
int grid_y = number_of_rows / block_dim + 1;
int grid_x = col / block_dim + 1;
dim3 blocks(grid_x, grid_y, 1);
submatrix_reduction_by_cell<<<blocks, threads>>>(matrix, row, col, module, inv, pivot_col, pivot_row);
cudaDeviceSynchronize();
//necessario azzerare tutta la colonna (pivot_colonna)
for (int x = pivot_row + 1; x < row; x++) {
matrix[x*col + pivot_col] = 0;
}
}
}
}
__global__ void submatrix_reduction_by_row(int *matrix, int row, int col, int module, int start, int pivot_col, int inv, int pivot_row, int cell_per_thread) {
extern __shared__ int smem[];
if ((threadIdx.x * cell_per_thread) <= pivot_col) {
int row_offset = pivot_row * col;
int thread_offset = threadIdx.x * cell_per_thread;
//allocazione della smem con la riga di pivot, ogni thread copia una porzione di riga pari a "cell_per_thread".
for (int i = 0; i<cell_per_thread; i++) {
if (thread_offset + i <= pivot_col) {
smem[thread_offset + i] = matrix[row_offset + thread_offset + i];
}
}
}
__syncthreads();
int x = 0, y = 0;
int row_index = (pivot_row + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if (row_index >= start && row_index < row) {
int row_linear_index = row_index * col + pivot_col;
if (matrix[row_linear_index] != 0) {
y = mul_mod_GPU(inv, matrix[row_linear_index], module);
for (int k = 0; k < pivot_col + 1; k++) {
//a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
x = mul_mod_GPU(y, smem[k], module);
matrix[row_index*col + k] = sub_mod_GPU(matrix[row_index*col + k], x, module);
}
}
}
}
__global__ void gaussian_reduction_by_row(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv, block_dim, threads_per_block, block_x_axis;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
block_dim = 256;
int row_to_check = row - rows_found;
threads_per_block = (row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if (threads_per_block == block_dim && row_to_check != block_dim) {
block_x_axis = (row_to_check / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot << <b_find, t_find >> > (matrix, row, col, r, pivot_col);
cudaDeviceSynchronize();
r = next_pivot_row;
if (r < row) {
if (r != rows_found) {
block_dim = 256;
threads_per_block = (col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if (threads_per_block == block_dim && col != block_dim) {
block_x_axis = (col / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows << <b_swap, t_swap >> > (matrix, row, col, rows_found, r);
cudaDeviceSynchronize();
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module); //inverso dellī elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - rows_found;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if (t == block_dim && numero_righe != block_dim) {
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_col + 1;
int cell_per_thread = (t >= pivot_length) ? 1 : (pivot_length / t) + 1;
int shared_mem = pivot_length * sizeof(int);
submatrix_reduction_by_row<<<blocks, threads, shared_mem >>>(matrix, row, col, module, rows_found, pivot_col, inv, pivot_row, cell_per_thread);
cudaDeviceSynchronize();
}
}
}
__global__ void submatrix_reduction_by_block(int *matrix, int row, int col, int module, int pivot_col, int inv, int pivot_row, int thread_height, int block_dim) {
extern __shared__ int smem[];
int *smem_pivot_row = (int*)smem;
int *smem_pivot_col = (int*)&smem_pivot_row[block_dim];
int x = 0, y = 0, interation = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
//-------------
//inizzializzazione smem per pivot riga
smem_pivot_row[threadIdx.x] = matrix[pivot_row * col + col_index]; //ogni thread copia un solo elemento nella riga in shared, un thread per cella di riga
//------------
//inizializzazione smem per pivot colonna
//calcolo del numero di celle (colonna_pivot) che ogni thred deve copiare
int cell_to_copy = 1;
if (thread_height > blockDim.x) {
cell_to_copy = thread_height / blockDim.x + 1;
}
int base_row = (pivot_row + 1) + blockIdx.y * thread_height;
int index = 0;
//copia della porzione di colonna in smem
for (int i = 0; i<cell_to_copy; i++) {
index = (threadIdx.x * cell_to_copy) + i;
if (base_row + index < row && index < thread_height) {
smem_pivot_col[index] = matrix[(base_row + index) * col + pivot_col];
}
}
//sincronizza tutti i thread del blocco in modo tale che la smem sia consistente
__syncthreads();
if (col_index < pivot_col) {
//calcolo del numero di righe sulle quali deve iterare il thread, caso in cui la dimensione della matrice non collima con thread_height
int reached_row = (pivot_row + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if (reached_row > row) {
interation = thread_height - (reached_row - row); //dimensione non collima
}
else {
interation = thread_height; //caso normale
}
int row_offset = (pivot_row + 1) + (blockIdx.y * thread_height);
for (int i = 0; i<interation; i++) {
int pivot_element = smem_pivot_col[i];
if (pivot_element != 0) {
y = mul_mod_GPU(inv, pivot_element, module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
x = mul_mod_GPU(y, smem_pivot_row[threadIdx.x], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], x, module);
}
row_offset++;
}
}
}
__global__ void gaussian_reduction_by_block(int *matrix, int row, int col, int module) {
int pivot_row = 0, r = 0, rows_found = 0;
int inv;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for (int pivot_col = col - 1; pivot_col >= 0; pivot_col--) {
r = rows_found;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
block_dim = 256; //base 256
int row_to_check = row - rows_found;
threads_per_block = (row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if (threads_per_block == block_dim && row_to_check != block_dim) {
block_x_axis = (row_to_check / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot << <b_find, t_find >> >(matrix, row, col, r, pivot_col);
cudaDeviceSynchronize();
r = next_pivot_row;
/////////////////////////////////////////////////////////////////////////////////
if (r < row) {
if (r != rows_found) {
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
block_dim = 256; //base 256
threads_per_block = (col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if (threads_per_block == block_dim && col != block_dim) {
block_x_axis = (col / block_dim) + 1;
}
else {
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows << <b_swap, t_swap >> >(matrix, row, col, rows_found, r);
cudaDeviceSynchronize();
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_row = rows_found;
rows_found++;
inv = invers_GPU(matrix[pivot_row*col + pivot_col], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128; //base 128
int col_to_reduce = pivot_col;
threads_per_block = (col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if (threads_per_block == block_dim && col_to_reduce != block_dim) {
block_x_axis = (col_to_reduce / block_dim) + 1;
}
else {
block_x_axis = 1;
}
int thread_height = 32; //base 256
int row_to_reduce = row - rows_found;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
int shared = (block_dim * sizeof(int)) + (thread_height * sizeof(int));
submatrix_reduction_by_block <<<blocks, threads, shared>>>(matrix, row, col, module, pivot_col, inv, pivot_row, thread_height, block_dim);
cudaDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
thread_height = 50; //base 100
block_dim = 32; //base 32
row_to_reduce = row - pivot_row;
threads_per_block = (row_to_reduce < thread_height ? 1 : block_dim);
block_x_axis = (threads_per_block == block_dim && row_to_reduce != block_dim) ? (row_to_reduce / (thread_height*block_dim) + 1) : 1;
dim3 t(threads_per_block);
dim3 b(block_x_axis);
reset_pivot_col <<<b, t >>>(matrix, row, col, pivot_row, pivot_col, thread_height, block_dim);
cudaDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
double gauss_CUDA(int *m, int row, int col, int module) {
int matrix_length = row * col;
int matrix_length_bytes = matrix_length * sizeof(int);
clock_t start, end;
double elapsed = 0.0;
int *m_d;
gpuErrchk(cudaMalloc((void **)&m_d, matrix_length_bytes));
gpuErrchk(cudaMemcpy(m_d, m, matrix_length_bytes, cudaMemcpyHostToDevice));
start = clock();
gaussian_reduction_by_row<<<1, 1>>>(m_d, row, col, module);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(m, m_d, matrix_length_bytes, cudaMemcpyDeviceToHost));
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
gpuErrchk(cudaFree(m_d));
return elapsed;
}
void resolve_system(int **matrix, int * row, int col, struct map map, int *degree, int **monomi, int numero_variabili, int n_loops, int expansion, FILE *output_file) {
///////////////////////////// INIZIALIZZAZIONE STRUTTURE DI SUPPORTO ////////////////////////////
clock_t start, end;
double elapsed;
//creo l'array che conterrā i gradi dei vari round
int **m_deg_array = (int **)malloc(sizeof(int*));
m_deg_array[0] = (int *)calloc(max_degree + 1, sizeof(int));
int n_round = 0;
int *m_deg = m_deg_array[0];
int missing_degree = max_degree;
fprintf(output_file, "Inizio computazione, metodo standard\n");
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
///////////////////////////////////////////////////////////////////////////////////////
int stop = 0;
while (stop != 1) {
n_round++;
fprintf(output_file, "\n -Eseguo moltiplicazione, ");
fflush(stdout);
start = clock();
//find missing degree to multiply matrix
for (int i = max_degree; i>0; i--) {
if (m_deg[i] == 0) {
missing_degree = i;
break;
}
}
/////////////// ESPANSIONE SISTEMA ////////////////////////////////
moltiplica_matrice(matrix, row, col, map, degree, monomi, numero_variabili, missing_degree, max_degree);
//////////////////////////////////////////////////////////////////
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)", *row, elapsed);
fprintf(output_file, "\n -Eseguo Gauss, ");
fflush(stdout);
///////////////////// RIDUZIONE SISTEMA //////////////////////////////
elapsed = gauss_CUDA(*matrix, *row, col, module);
//elimino le righe nulle della matrice
eliminate_null_rows(matrix, row, col);
m_deg_array = (int **)realloc(m_deg_array, sizeof(int*)*(n_round + 1));
m_deg_array[n_round] = (int *)calloc(max_degree + 1, sizeof(int));
m_deg = m_deg_array[n_round];
/////////////////////////////////////////////////////////////////////
//////////////// CALCOLO GRADI MANCANTI /////////////////////////////
fprintf(output_file, "numero righe: %d (%f sec)\n", *row, elapsed);
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
print_matrix_degree(m_deg, output_file, max_degree);
if (target_degree(m_deg, max_degree) == 0)
stop = 1;
}
for (int i = 0; i < n_round + 1; i++)
free(m_deg_array[i]);
free(m_deg_array);
}
int main(int argc, char *argv[]) {
/////////////////////// INIZIALIZZAZIONE ////////////////////////////////////////////
FILE *input_file = NULL, *output_file = NULL;
for (int parsed = 1; parsed < argc; parsed++) {
if (parsed < argc && !strcmp(argv[parsed], "--input")) {
parsed++;
input_file = fopen(argv[parsed], "r");
if (!input_file) {
perror("Errore nell'apertura del file di input");
return (-1);
}
}
else if (parsed < argc && !strcmp(argv[parsed], "--output")) {
parsed++;
output_file = fopen(argv[parsed], "w");
if (!output_file) {
perror("Errore nell'apertura del file di output");
return (-1);
}
}
}
if (!input_file)
input_file = stdin;
if (!output_file)
output_file = stdout;
int row, col, numero_variabili, tipo_ordinamento;
int *matrix;
char *variabili;
row = col = numero_variabili = 0;
int(*ord) (void*, const void *, const void *);
struct map smap;
clock_t start, end;
double elapsed = 0.0;
start = clock();
//////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////// ALLOCAZIONE STRUTTURE DI SUPPORTO ///////////////////////////////////////////
allocation(&matrix, &row, &col, &numero_variabili, &variabili, &tipo_ordinamento, &module, &max_degree, input_file);
if (order(&ord, tipo_ordinamento) != 0) {
fprintf(stderr, "Ordinamento insesistente!!!\n\nTERMINAZIONE PROGRAMMA");
return 0;
}
int * degree = (int *)calloc(max_degree + 1, sizeof(int));
int numero_monomi = col;
int **monomi;
//crea il vettore con tutti i possibili monomi avendo num_var varaibili e max_degree come massimo grado
monomi = monomial_computation(numero_variabili, max_degree, numero_monomi);
//ordina il vettore dei monomi secondo un determinato ordinamento, ordinamento intercambiabile
qsort_s(monomi, numero_monomi, sizeof(int*), ord, &numero_variabili);
//inizializzazione matrice (lettura dati input)
if (parse(numero_variabili, variabili, matrix, row, monomi, col, module, ord, input_file) == -1) {
fprintf(stderr, "Errore di input !!!\n\nTERMINAZIONE PROGRAMMA"); //se l'input č in formato scorrettro abort del programma
return 0;
}
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nInizializzazione in %f sec\n", elapsed);
start = clock();
setup_struct_map(&smap, monomi, numero_monomi, numero_variabili, max_degree, ord);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nMappa creata in %f sec, %d x %d \n\n", elapsed, col, col);
/////////////////////////// RISOLUZIONE SISTEMA ////////////////////////////////////////////////
start = clock();
//inizializzazione vettore dei gradi dei polinomi
init_degree_vector(degree, numero_variabili, max_degree);
int n_loops = 30, expansion = 1;
//eseguo moltiplicazione e riduzione di Gauss finche non trovo soluzione
resolve_system(&matrix, &row, col, smap, degree, monomi, numero_variabili, n_loops, expansion, output_file);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nTarget raggiunto, soluzione trovata in %f sec\n\n", elapsed);
/////////////////////////////////////////////////////////////////////////////////////////////
print_incognite(matrix, row, col, numero_variabili, monomi, output_file);
free(matrix);
free(degree);
cudaDeviceReset();
return 0;
}
|
89c348536b12f0b5c1bbac3e376f463144c9fea7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Thread2D.h"
#include "Thread1D.h"
#include "cudas.h"
#include "Indices_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
__global__ void rippling(uchar4* tabPixelsGM , uint w , uint h , float t)
{
RipplingMath ripplingMath = RipplingMath(w , h, t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
int pixelI, pixelJ;
int s = TID;
while(s<WH){
Indices::toIJ(s, w, &pixelI, &pixelJ);
ripplingMath.colorIJ(&tabPixelsGM[s], pixelI, pixelJ);
s+= NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 89c348536b12f0b5c1bbac3e376f463144c9fea7.cu | #include "Thread2D.h"
#include "Thread1D.h"
#include "cudas.h"
#include "Indices_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
__global__ void rippling(uchar4* tabPixelsGM , uint w , uint h , float t)
{
RipplingMath ripplingMath = RipplingMath(w , h, t);
const int TID = Thread2D::tid();
const int NB_THREAD = Thread2D::nbThread();
const int WH = w * h;
int pixelI, pixelJ;
int s = TID;
while(s<WH){
Indices::toIJ(s, w, &pixelI, &pixelJ);
ripplingMath.colorIJ(&tabPixelsGM[s], pixelI, pixelJ);
s+= NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0be6fca04a0a1366fb55787ebee85cd35fc3e004.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kMultByRowVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
hipMalloc(&tgtMat, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kMultByRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kMultByRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kMultByRowVector), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0be6fca04a0a1366fb55787ebee85cd35fc3e004.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kMultByRowVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
cudaMalloc(&tgtMat, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kMultByRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kMultByRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kMultByRowVector<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f6ce27904f68852c725bf56ddc39be4c15d4cdd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright by Contributors 2017
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
bool silent;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe(
"Number of devices to use for prediction (NOT IMPLEMENTED).");
DMLC_DECLARE_FIELD(silent).set_default(false).describe(
"Do not print information during trainig.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DeviceMatrix
*
* \brief A csr representation of the input matrix allocated on the device.
*/
struct DeviceMatrix {
DMatrix* p_mat; // Pointer to the original matrix on the host
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<size_t> row_ptr;
dh::DVec<Entry> data;
thrust::device_vector<float> predictions;
DeviceMatrix(DMatrix* dmat, int device_idx, bool silent) : p_mat(dmat) {
dh::safe_cuda(hipSetDevice(device_idx));
const auto& info = dmat->Info();
ba.Allocate(device_idx, silent, &row_ptr, info.num_row_ + 1, &data,
info.num_nonzero_);
size_t data_offset = 0;
for (const auto &batch : dmat->GetRowBatches()) {
const auto& offset_vec = batch.offset.HostVector();
const auto& data_vec = batch.data.HostVector();
// Copy row ptr
dh::safe_cuda(hipMemcpy(
row_ptr.Data() + batch.base_rowid, offset_vec.data(),
sizeof(size_t) * offset_vec.size(), hipMemcpyHostToDevice));
if (batch.base_rowid > 0) {
auto begin_itr = row_ptr.tbegin() + batch.base_rowid;
auto end_itr = begin_itr + batch.Size() + 1;
IncrementOffset(begin_itr, end_itr, batch.base_rowid);
}
dh::safe_cuda(hipMemcpy(data.Data() + data_offset, data_vec.data(),
sizeof(Entry) * data_vec.size(),
hipMemcpyHostToDevice));
// Copy data
data_offset += batch.data.Size();
}
}
};
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx(-1), left_child_idx(-1), right_child_idx(-1) {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
size_t* d_row_ptr;
Entry* d_data;
int num_features;
float* smem;
__device__ ElementLoader(bool use_shared, size_t* row_ptr,
Entry* entry, int num_features,
float* smem, int num_rows)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data + d_row_ptr[ridx];
auto end_ptr = d_data + d_row_ptr[ridx + 1];
Entry* previous_middle = nullptr;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(const DevicePredictionNode* d_nodes,
float* d_out_predictions, size_t* d_tree_segments,
int* d_tree_group, size_t* d_row_ptr,
Entry* d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
d_nodes + d_tree_segments[tree_idx - tree_begin];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
d_nodes + d_tree_segments[tree_idx - tree_begin];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) {
return;
}
std::shared_ptr<DeviceMatrix> device_matrix;
// Matrix is not in host cache, create a temporary matrix
if (this->cache_.find(dmat) == this->cache_.end()) {
device_matrix = std::shared_ptr<DeviceMatrix>(
new DeviceMatrix(dmat, param.gpu_id, param.silent));
} else {
// Create this matrix on device if doesn't exist
if (this->device_matrix_cache_.find(dmat) ==
this->device_matrix_cache_.end()) {
this->device_matrix_cache_.emplace(
dmat, std::shared_ptr<DeviceMatrix>(
new DeviceMatrix(dmat, param.gpu_id, param.silent)));
}
device_matrix = device_matrix_cache_.find(dmat)->second;
}
dh::safe_cuda(hipSetDevice(param.gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees[tree_idx]->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees[tree_idx]->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
nodes.resize(h_nodes.size());
dh::safe_cuda(hipMemcpy(dh::Raw(nodes), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpy(dh::Raw(tree_segments), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpy(dh::Raw(tree_group), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
device_matrix->predictions.resize(out_preds->Size());
auto& predictions = device_matrix->predictions;
out_preds->GatherTo(predictions.data(),
predictions.data() + predictions.size());
dh::safe_cuda(hipSetDevice(param.gpu_id));
const int BLOCK_THREADS = 128;
const int GRID_SIZE = static_cast<int>(
dh::DivRoundUp(device_matrix->row_ptr.Size() - 1, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>(
sizeof(float) * device_matrix->p_mat->Info().num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>)
, dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::Raw(nodes), dh::Raw(device_matrix->predictions),
dh::Raw(tree_segments), dh::Raw(tree_group),
device_matrix->row_ptr.Data(), device_matrix->data.Data(),
tree_begin, tree_end, device_matrix->p_mat->Info().num_col_,
device_matrix->p_mat->Info().num_row_, use_shared,
model.param.num_output_group);
dh::safe_cuda(hipDeviceSynchronize());
out_preds->ScatterFrom(predictions.data(),
predictions.data() + predictions.size());
}
public:
GPUPredictor() : cpu_predictor(Predictor::Create("cpu_predictor")) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n = model.param.num_output_group * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Reshard(devices);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
out_preds->Reshard(devices);
out_preds->Resize(y.Size());
out_preds->Copy(y);
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor->Init(cfg, cache);
param.InitAllowUnknown(cfg);
devices = GPUSet::All(param.n_gpus).Normalised(param.gpu_id);
max_shared_memory_bytes = dh::MaxSharedMemory(param.gpu_id);
}
private:
GPUPredictionParam param;
std::unique_ptr<Predictor> cpu_predictor;
std::unordered_map<DMatrix*, std::shared_ptr<DeviceMatrix>>
device_matrix_cache_;
thrust::device_vector<DevicePredictionNode> nodes;
thrust::device_vector<size_t> tree_segments;
thrust::device_vector<int> tree_group;
thrust::device_vector<bst_float> preds;
GPUSet devices;
size_t max_shared_memory_bytes;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
| f6ce27904f68852c725bf56ddc39be4c15d4cdd2.cu | /*!
* Copyright by Contributors 2017
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
bool silent;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe(
"Number of devices to use for prediction (NOT IMPLEMENTED).");
DMLC_DECLARE_FIELD(silent).set_default(false).describe(
"Do not print information during trainig.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DeviceMatrix
*
* \brief A csr representation of the input matrix allocated on the device.
*/
struct DeviceMatrix {
DMatrix* p_mat; // Pointer to the original matrix on the host
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<size_t> row_ptr;
dh::DVec<Entry> data;
thrust::device_vector<float> predictions;
DeviceMatrix(DMatrix* dmat, int device_idx, bool silent) : p_mat(dmat) {
dh::safe_cuda(cudaSetDevice(device_idx));
const auto& info = dmat->Info();
ba.Allocate(device_idx, silent, &row_ptr, info.num_row_ + 1, &data,
info.num_nonzero_);
size_t data_offset = 0;
for (const auto &batch : dmat->GetRowBatches()) {
const auto& offset_vec = batch.offset.HostVector();
const auto& data_vec = batch.data.HostVector();
// Copy row ptr
dh::safe_cuda(cudaMemcpy(
row_ptr.Data() + batch.base_rowid, offset_vec.data(),
sizeof(size_t) * offset_vec.size(), cudaMemcpyHostToDevice));
if (batch.base_rowid > 0) {
auto begin_itr = row_ptr.tbegin() + batch.base_rowid;
auto end_itr = begin_itr + batch.Size() + 1;
IncrementOffset(begin_itr, end_itr, batch.base_rowid);
}
dh::safe_cuda(cudaMemcpy(data.Data() + data_offset, data_vec.data(),
sizeof(Entry) * data_vec.size(),
cudaMemcpyHostToDevice));
// Copy data
data_offset += batch.data.Size();
}
}
};
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx(-1), left_child_idx(-1), right_child_idx(-1) {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
size_t* d_row_ptr;
Entry* d_data;
int num_features;
float* smem;
__device__ ElementLoader(bool use_shared, size_t* row_ptr,
Entry* entry, int num_features,
float* smem, int num_rows)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data + d_row_ptr[ridx];
auto end_ptr = d_data + d_row_ptr[ridx + 1];
Entry* previous_middle = nullptr;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(const DevicePredictionNode* d_nodes,
float* d_out_predictions, size_t* d_tree_segments,
int* d_tree_group, size_t* d_row_ptr,
Entry* d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
d_nodes + d_tree_segments[tree_idx - tree_begin];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
d_nodes + d_tree_segments[tree_idx - tree_begin];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) {
return;
}
std::shared_ptr<DeviceMatrix> device_matrix;
// Matrix is not in host cache, create a temporary matrix
if (this->cache_.find(dmat) == this->cache_.end()) {
device_matrix = std::shared_ptr<DeviceMatrix>(
new DeviceMatrix(dmat, param.gpu_id, param.silent));
} else {
// Create this matrix on device if doesn't exist
if (this->device_matrix_cache_.find(dmat) ==
this->device_matrix_cache_.end()) {
this->device_matrix_cache_.emplace(
dmat, std::shared_ptr<DeviceMatrix>(
new DeviceMatrix(dmat, param.gpu_id, param.silent)));
}
device_matrix = device_matrix_cache_.find(dmat)->second;
}
dh::safe_cuda(cudaSetDevice(param.gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees[tree_idx]->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees[tree_idx]->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
nodes.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(nodes), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(tree_segments), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpy(dh::Raw(tree_group), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
device_matrix->predictions.resize(out_preds->Size());
auto& predictions = device_matrix->predictions;
out_preds->GatherTo(predictions.data(),
predictions.data() + predictions.size());
dh::safe_cuda(cudaSetDevice(param.gpu_id));
const int BLOCK_THREADS = 128;
const int GRID_SIZE = static_cast<int>(
dh::DivRoundUp(device_matrix->row_ptr.Size() - 1, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>(
sizeof(float) * device_matrix->p_mat->Info().num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
PredictKernel<BLOCK_THREADS>
<<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>(
dh::Raw(nodes), dh::Raw(device_matrix->predictions),
dh::Raw(tree_segments), dh::Raw(tree_group),
device_matrix->row_ptr.Data(), device_matrix->data.Data(),
tree_begin, tree_end, device_matrix->p_mat->Info().num_col_,
device_matrix->p_mat->Info().num_row_, use_shared,
model.param.num_output_group);
dh::safe_cuda(cudaDeviceSynchronize());
out_preds->ScatterFrom(predictions.data(),
predictions.data() + predictions.size());
}
public:
GPUPredictor() : cpu_predictor(Predictor::Create("cpu_predictor")) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n = model.param.num_output_group * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Reshard(devices);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
out_preds->Reshard(devices);
out_preds->Resize(y.Size());
out_preds->Copy(y);
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor->Init(cfg, cache);
param.InitAllowUnknown(cfg);
devices = GPUSet::All(param.n_gpus).Normalised(param.gpu_id);
max_shared_memory_bytes = dh::MaxSharedMemory(param.gpu_id);
}
private:
GPUPredictionParam param;
std::unique_ptr<Predictor> cpu_predictor;
std::unordered_map<DMatrix*, std::shared_ptr<DeviceMatrix>>
device_matrix_cache_;
thrust::device_vector<DevicePredictionNode> nodes;
thrust::device_vector<size_t> tree_segments;
thrust::device_vector<int> tree_group;
thrust::device_vector<bst_float> preds;
GPUSet devices;
size_t max_shared_memory_bytes;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
c3855add53d18f4df4cd6619db12ba4a3615c488.hip | // !!! This is a file automatically generated by hipify!!!
// C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <hip/hip_runtime.h>
// CMSSW headers
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "PixelRecHitGPUKernel.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
hipStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DGPU hits_d(nHits, cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
// protect from empty events
if (blocks) {
hipLaunchKernelGGL(( gpuPixelRecHits::getHits), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
cudaCheck(hipDeviceSynchronize());
#endif
}
// assuming full warp of threads is better than a smaller number...
if (nHits) {
hipLaunchKernelGGL(( setHitsLayerStart), dim3(1), dim3(32), 0, stream, clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(hipGetLastError());
cms::cuda::fillManyFromVector(
hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
cudaCheck(hipDeviceSynchronize());
#endif
}
return hits_d;
}
} // namespace pixelgpudetails
| c3855add53d18f4df4cd6619db12ba4a3615c488.cu | // C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <cuda_runtime.h>
// CMSSW headers
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "PixelRecHitGPUKernel.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DGPU PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
cudaStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DGPU hits_d(nHits, cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
// protect from empty events
if (blocks) {
gpuPixelRecHits::getHits<<<blocks, threadsPerBlock, 0, stream>>>(
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaCheck(cudaDeviceSynchronize());
#endif
}
// assuming full warp of threads is better than a smaller number...
if (nHits) {
setHitsLayerStart<<<1, 32, 0, stream>>>(clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(cudaGetLastError());
cms::cuda::fillManyFromVector(
hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, hits_d.phiBinnerStorage(), stream);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaCheck(cudaDeviceSynchronize());
#endif
}
return hits_d;
}
} // namespace pixelgpudetails
|
7301f0e3ca14fc41c2d08d692aa99000dbac5d05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarfx.cu, normal z -> d, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarfx_kernel( int m, double *v, double *tau,
double *c, int ldc, double *xnorm,
double *T, int it )
{
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
//double *dc = c + (blockIdx.x-it-1) * ldc;
double *dc = c + (blockIdx.x) * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_D_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
v[j] = MAGMA_D_ONE;
}
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_D_CONJ(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel(const double *T, int ldt, double *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ double tlocal[ BLOCK_SIZE ];
double res = MAGMA_D_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel2(const double *T, int ldt, double *t,
double *y, double *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ double sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_tkernel(double *T, int ldt, double *t, double *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ double sum[ 128 ];
sum[tx] = MAGMA_D_CONJ(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's dlarf routine.
*/
extern "C" void
magma_dlarfx_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr v,
magmaDouble_ptr tau,
magmaDouble_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDouble_ptr dT, magma_int_t iter,
magmaDouble_ptr work,
magma_queue_t queue )
{
magma_int_t N = n + iter + 1;
if (iter == 0) {
hipLaunchKernelGGL(( magma_dlarfx_kernel)
, dim3(N), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, v, tau, C, ldc, xnorm, dT+iter*N, iter );
}
else {
hipLaunchKernelGGL(( magma_dlarfx_kernel)
, dim3(N), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, v, tau, C, ldc, xnorm, work, iter );
}
if (iter > 0) {
//magma_dtrmv_kernel
// <<< 1, iter, 0, queue->cuda_stream() >>>
// ( dT, N, dT+iter*N);
hipLaunchKernelGGL(( magma_dtrmv_kernel2)
, dim3(iter), dim3(iter), 0, queue->cuda_stream() ,
dT, N, work, dT+iter*N, tau );
}
}
| 7301f0e3ca14fc41c2d08d692aa99000dbac5d05.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarfx.cu, normal z -> d, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarfx_kernel( int m, double *v, double *tau,
double *c, int ldc, double *xnorm,
double *T, int it )
{
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
//double *dc = c + (blockIdx.x-it-1) * ldc;
double *dc = c + (blockIdx.x) * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_D_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
v[j] = MAGMA_D_ONE;
}
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_D_CONJ(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel(const double *T, int ldt, double *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ double tlocal[ BLOCK_SIZE ];
double res = MAGMA_D_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel2(const double *T, int ldt, double *t,
double *y, double *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ double sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_tkernel(double *T, int ldt, double *t, double *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ double sum[ 128 ];
sum[tx] = MAGMA_D_CONJ(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's dlarf routine.
*/
extern "C" void
magma_dlarfx_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr v,
magmaDouble_ptr tau,
magmaDouble_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDouble_ptr dT, magma_int_t iter,
magmaDouble_ptr work,
magma_queue_t queue )
{
magma_int_t N = n + iter + 1;
if (iter == 0) {
magma_dlarfx_kernel
<<< N, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m, v, tau, C, ldc, xnorm, dT+iter*N, iter );
}
else {
magma_dlarfx_kernel
<<< N, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m, v, tau, C, ldc, xnorm, work, iter );
}
if (iter > 0) {
//magma_dtrmv_kernel
// <<< 1, iter, 0, queue->cuda_stream() >>>
// ( dT, N, dT+iter*N);
magma_dtrmv_kernel2
<<< iter, iter, 0, queue->cuda_stream() >>>
( dT, N, work, dT+iter*N, tau );
}
}
|
642d7982996eff85a20536c3601a4adcd40397f5.hip | // !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
typedef unsigned int uint;
//--------------------------------------------------------------------------------------
// Struct Defines
//--------------------------------------------------------------------------------------
typedef struct{
uint g_iLevel;
uint g_iLevelMask;
uint g_iWidth;
uint g_iHeight;
uint g_iField;
} settings_t;
#define DATA_TYPE float2
#define BITONIC_BLOCK_SIZE 1024
//#define BITONIC_BLOCK_SIZE 128
#define TRANSPOSE_BLOCK_SIZE 8
//#define TRANSPOSE_BLOCK_SIZE 8
//--------------------------------------------------------------------------------------
// Bitonic Sort Compute Shader
//--------------------------------------------------------------------------------------
__device__ bool compare_l(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x<B.x) : (A.y<B.y);}
__device__ bool compare_g(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x>B.x) : (A.y>B.y);}
__device__ bool compare_le(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x<=B.x) : (A.y<=B.y);}
__device__ bool compare_ge(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x>=B.x) : (A.y>=B.y);}
extern "C" __global__
void BitonicSort( const settings_t settings,
DATA_TYPE *Data )
{
__shared__ DATA_TYPE shared_data[BITONIC_BLOCK_SIZE];
const uint X = blockIdx.x * blockDim.x + threadIdx.x;
const uint GI = threadIdx.x;
// Load shared data
shared_data[GI] = Data[X];
__syncthreads();
// Sort the shared data
for (unsigned int j = settings.g_iLevel >> 1 ; j > 0 ; j >>= 1)
{
bool b1 = compare_le(shared_data[GI & (~j)], shared_data[GI | j], settings.g_iField);
bool b2 = ((settings.g_iLevelMask & X) != 0);
DATA_TYPE result = ( b1 == b2 )? shared_data[GI ^ j] : shared_data[GI];
__syncthreads();
shared_data[GI] = result;
__syncthreads();
}
// Store shared data
Data[X] = shared_data[GI];
}
//--------------------------------------------------------------------------------------
// Matrix Transpose Compute Shader
//--------------------------------------------------------------------------------------
extern "C" __global__
void MatrixTranspose( const settings_t settings,
const DATA_TYPE *Input,
DATA_TYPE *Output)
{
__shared__ DATA_TYPE transpose_shared_data[TRANSPOSE_BLOCK_SIZE * TRANSPOSE_BLOCK_SIZE];
const uint Input_X = blockIdx.x * blockDim.x + threadIdx.x;
const uint Input_Y = blockIdx.y * blockDim.y + threadIdx.y;
const uint GI = threadIdx.y * TRANSPOSE_BLOCK_SIZE + threadIdx.x;
transpose_shared_data[GI] = Input[Input_Y * settings.g_iWidth + Input_X];
__syncthreads();
uint X = Input_Y - threadIdx.y + threadIdx.x;
uint Y = Input_X - threadIdx.x + threadIdx.y;
uint OIndex = Y * settings.g_iHeight + X;
uint SIndex = threadIdx.x * TRANSPOSE_BLOCK_SIZE + threadIdx.y;
Output[OIndex] = transpose_shared_data[SIndex];
}
| 642d7982996eff85a20536c3601a4adcd40397f5.cu | //Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
typedef unsigned int uint;
//--------------------------------------------------------------------------------------
// Struct Defines
//--------------------------------------------------------------------------------------
typedef struct{
uint g_iLevel;
uint g_iLevelMask;
uint g_iWidth;
uint g_iHeight;
uint g_iField;
} settings_t;
#define DATA_TYPE float2
#define BITONIC_BLOCK_SIZE 1024
//#define BITONIC_BLOCK_SIZE 128
#define TRANSPOSE_BLOCK_SIZE 8
//#define TRANSPOSE_BLOCK_SIZE 8
//--------------------------------------------------------------------------------------
// Bitonic Sort Compute Shader
//--------------------------------------------------------------------------------------
__device__ bool compare_l(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x<B.x) : (A.y<B.y);}
__device__ bool compare_g(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x>B.x) : (A.y>B.y);}
__device__ bool compare_le(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x<=B.x) : (A.y<=B.y);}
__device__ bool compare_ge(DATA_TYPE A, DATA_TYPE B,const uint element) { return (element == 0)? (A.x>=B.x) : (A.y>=B.y);}
extern "C" __global__
void BitonicSort( const settings_t settings,
DATA_TYPE *Data )
{
__shared__ DATA_TYPE shared_data[BITONIC_BLOCK_SIZE];
const uint X = blockIdx.x * blockDim.x + threadIdx.x;
const uint GI = threadIdx.x;
// Load shared data
shared_data[GI] = Data[X];
__syncthreads();
// Sort the shared data
for (unsigned int j = settings.g_iLevel >> 1 ; j > 0 ; j >>= 1)
{
bool b1 = compare_le(shared_data[GI & (~j)], shared_data[GI | j], settings.g_iField);
bool b2 = ((settings.g_iLevelMask & X) != 0);
DATA_TYPE result = ( b1 == b2 )? shared_data[GI ^ j] : shared_data[GI];
__syncthreads();
shared_data[GI] = result;
__syncthreads();
}
// Store shared data
Data[X] = shared_data[GI];
}
//--------------------------------------------------------------------------------------
// Matrix Transpose Compute Shader
//--------------------------------------------------------------------------------------
extern "C" __global__
void MatrixTranspose( const settings_t settings,
const DATA_TYPE *Input,
DATA_TYPE *Output)
{
__shared__ DATA_TYPE transpose_shared_data[TRANSPOSE_BLOCK_SIZE * TRANSPOSE_BLOCK_SIZE];
const uint Input_X = blockIdx.x * blockDim.x + threadIdx.x;
const uint Input_Y = blockIdx.y * blockDim.y + threadIdx.y;
const uint GI = threadIdx.y * TRANSPOSE_BLOCK_SIZE + threadIdx.x;
transpose_shared_data[GI] = Input[Input_Y * settings.g_iWidth + Input_X];
__syncthreads();
uint X = Input_Y - threadIdx.y + threadIdx.x;
uint Y = Input_X - threadIdx.x + threadIdx.y;
uint OIndex = Y * settings.g_iHeight + X;
uint SIndex = threadIdx.x * TRANSPOSE_BLOCK_SIZE + threadIdx.y;
Output[OIndex] = transpose_shared_data[SIndex];
}
|
397f3f7d29897d148fe37b652d0e0fb68e291662.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void update_vb(float *d_verts_ptr, int vertex_count, float timeElapsed)
{
const unsigned long long int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId < vertex_count * 4)
{
float valx = d_verts_ptr[threadId * 4 + 0];
float valy = d_verts_ptr[threadId * 4 + 1];
float valz = d_verts_ptr[threadId * 4 + 2];
d_verts_ptr[threadId * 4 + 0] = valx * timeElapsed;
d_verts_ptr[threadId * 4 + 1] = valy * timeElapsed;
d_verts_ptr[threadId * 4 + 2] = valz * timeElapsed;
}
} | 397f3f7d29897d148fe37b652d0e0fb68e291662.cu | #include "includes.h"
__global__ void update_vb(float *d_verts_ptr, int vertex_count, float timeElapsed)
{
const unsigned long long int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId < vertex_count * 4)
{
float valx = d_verts_ptr[threadId * 4 + 0];
float valy = d_verts_ptr[threadId * 4 + 1];
float valz = d_verts_ptr[threadId * 4 + 2];
d_verts_ptr[threadId * 4 + 0] = valx * timeElapsed;
d_verts_ptr[threadId * 4 + 1] = valy * timeElapsed;
d_verts_ptr[threadId * 4 + 2] = valz * timeElapsed;
}
} |
03a5da5972e06d8d746e03b20606c1dc427053a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
__global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
} | 03a5da5972e06d8d746e03b20606c1dc427053a5.cu | extern "C"
#include <math.h>
__global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
} |
f753c64cf045fb1bcc729b20330e3985a07da954.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int N,
const int G,
const int D,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias) {
const int outer_size = N * G;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int g = i % G;
#if __CUDA_ARCH__ >= 350
const T mu_val = __ldg(mu + i);
const T rsig_val = __ldg(rsig + i);
#else
const T mu_val = mu[i];
const T rsig_val = rsig[i];
#endif
for (int j = threadIdx.x; j < D; j += blockDim.x) {
const int index = i * D + j;
const int i_gamma = g * D + j;
#if __CUDA_ARCH__ >= 350
const T scale_val = __ldg(gamma + i_gamma) * rsig_val;
scale[index] = scale_val;
bias[index] = __ldg(beta + i_gamma) - scale_val * mu_val;
#else
const T scale_val = gamma[i_gamma] * rsig_val;
scale[index] = scale_val;
bias[index] = beta[i_gamma] - scale_val * mu_val;
#endif
}
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int outer_size = N * C;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
#if __CUDA_ARCH__ >= 350
const float scale_val = __ldg(scale + i);
const float bias_val = __ldg(bias + i);
#else
const float scale_val = scale[i];
const float bias_val = bias[i];
#endif
for (int j = threadIdx.x; j < HxW; j += blockDim.x) {
const int index = i * HxW + j;
#if __CUDA_ARCH__ >= 350
Y[index] = __ldg(X + index) * scale_val + bias_val;
#else
Y[index] = X[index] * scale_val + bias_val;
#endif
}
}
}
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int outer_size = N * HxW;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int n = i / HxW;
for (int j = threadIdx.x; j < C; j += blockDim.x) {
const int index = i * C + j;
const int i_scale = n * C + j;
#if __CUDA_ARCH__ >= 350
Y[index] =
__ldg(X + index) * __ldg(scale + i_scale) + __ldg(bias + i_scale);
#else
Y[index] = X[index] * scale[i_scale] + bias[i_scale];
#endif
}
}
}
template <typename T, StorageOrder kOrder>
__global__ void ComputeInternalGradientsCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* gamma,
T* ds,
T* db) {
const int outer_size = N * G;
const int inner_size = D * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int i_gamma = i % G * D + j / HxW;
const int index = kOrder == StorageOrder::NCHW
? i * inner_size + j
: (i / G * HxW + j % HxW) * G * D + i_gamma;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(gamma + i_gamma) * __ldg(dY + index);
#else
ds_val += gamma[i_gamma] * dY[index] * X[index];
db_val += gamma[i_gamma] * dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, hipcub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum());
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
__syncthreads();
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - mu * rsig
// Y = s * X + b
// let n = D * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -u * drsig/dX - rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* ds,
const T* db,
T* dX) {
const int C = G * D;
const T denom = T(1) / static_cast<T>(D * HxW);
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) *
(__ldg(X + i) - __ldg(mu + i_mu)) *
math::utils::Cube<T>(__ldg(rsig + i_mu));
const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu);
dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) +
(u - v) * denom;
#else
const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) *
math::utils::Cube<T>(rsig[i_mu]);
const T v = db[i_mu] * rsig[i_mu];
dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom;
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta) {
const int outer_size = G * D;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage dg_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T dg_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int n = j / HxW;
const int index = kOrder == StorageOrder::NCHW
? (n * outer_size + i) * HxW + j % HxW
: j * outer_size + i;
const int i_mu = n * G + i / D;
#if __CUDA_ARCH__ >= 350
dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu);
db_val += __ldg(dY + index);
#else
dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu];
db_val += dY[index];
#endif
}
dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, hipcub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum());
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
__syncthreads();
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::ComputeFusedParams(
const int N,
const int G,
const int D,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<float>)
, dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, G, D, mu, rsig, gamma, beta, scale, bias);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, C, HxW, X, scale, bias, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(N * HxW, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, C, HxW, X, scale, bias, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl(
const int N,
const int G,
const int D,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int size = N * G * D * HxW;
const int C = G * D;
ds_.Resize(N, G);
db_.Resize(N, G);
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
if (order_ == StorageOrder::NCHW) {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
} else {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
}
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
| f753c64cf045fb1bcc729b20330e3985a07da954.cu | // ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int N,
const int G,
const int D,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias) {
const int outer_size = N * G;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int g = i % G;
#if __CUDA_ARCH__ >= 350
const T mu_val = __ldg(mu + i);
const T rsig_val = __ldg(rsig + i);
#else
const T mu_val = mu[i];
const T rsig_val = rsig[i];
#endif
for (int j = threadIdx.x; j < D; j += blockDim.x) {
const int index = i * D + j;
const int i_gamma = g * D + j;
#if __CUDA_ARCH__ >= 350
const T scale_val = __ldg(gamma + i_gamma) * rsig_val;
scale[index] = scale_val;
bias[index] = __ldg(beta + i_gamma) - scale_val * mu_val;
#else
const T scale_val = gamma[i_gamma] * rsig_val;
scale[index] = scale_val;
bias[index] = beta[i_gamma] - scale_val * mu_val;
#endif
}
}
}
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int outer_size = N * C;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
#if __CUDA_ARCH__ >= 350
const float scale_val = __ldg(scale + i);
const float bias_val = __ldg(bias + i);
#else
const float scale_val = scale[i];
const float bias_val = bias[i];
#endif
for (int j = threadIdx.x; j < HxW; j += blockDim.x) {
const int index = i * HxW + j;
#if __CUDA_ARCH__ >= 350
Y[index] = __ldg(X + index) * scale_val + bias_val;
#else
Y[index] = X[index] * scale_val + bias_val;
#endif
}
}
}
template <>
__global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int outer_size = N * HxW;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int n = i / HxW;
for (int j = threadIdx.x; j < C; j += blockDim.x) {
const int index = i * C + j;
const int i_scale = n * C + j;
#if __CUDA_ARCH__ >= 350
Y[index] =
__ldg(X + index) * __ldg(scale + i_scale) + __ldg(bias + i_scale);
#else
Y[index] = X[index] * scale[i_scale] + bias[i_scale];
#endif
}
}
}
template <typename T, StorageOrder kOrder>
__global__ void ComputeInternalGradientsCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* gamma,
T* ds,
T* db) {
const int outer_size = N * G;
const int inner_size = D * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int i_gamma = i % G * D + j / HxW;
const int index = kOrder == StorageOrder::NCHW
? i * inner_size + j
: (i / G * HxW + j % HxW) * G * D + i_gamma;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(gamma + i_gamma) * __ldg(dY + index);
#else
ds_val += gamma[i_gamma] * dY[index] * X[index];
db_val += gamma[i_gamma] * dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, cub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum());
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
__syncthreads();
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - mu * rsig
// Y = s * X + b
// let n = D * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -u * drsig/dX - rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* ds,
const T* db,
T* dX) {
const int C = G * D;
const T denom = T(1) / static_cast<T>(D * HxW);
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) *
(__ldg(X + i) - __ldg(mu + i_mu)) *
math::utils::Cube<T>(__ldg(rsig + i_mu));
const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu);
dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) +
(u - v) * denom;
#else
const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) *
math::utils::Cube<T>(rsig[i_mu]);
const T v = db[i_mu] * rsig[i_mu];
dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom;
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta) {
const int outer_size = G * D;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage dg_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T dg_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int n = j / HxW;
const int index = kOrder == StorageOrder::NCHW
? (n * outer_size + i) * HxW + j % HxW
: j * outer_size + i;
const int i_mu = n * G + i / D;
#if __CUDA_ARCH__ >= 350
dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu);
db_val += __ldg(dY + index);
#else
dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu];
db_val += dY[index];
#endif
}
dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, cub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum());
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
__syncthreads();
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::ComputeFusedParams(
const int N,
const int G,
const int D,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias) {
ComputeFusedParamsCUDAKernel<float>
<<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, G, D, mu, rsig, gamma, beta, scale, bias);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, C, HxW, X, scale, bias, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(N * HxW, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, C, HxW, X, scale, bias, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl(
const int N,
const int G,
const int D,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int size = N * G * D * HxW;
const int C = G * D;
ds_.Resize(N, G);
db_.Resize(N, G);
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
if (order_ == StorageOrder::NCHW) {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
} else {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
}
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
7b3d0db99439dfc3c0be1fe148f907a7764cbc0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "resize.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__forceinline__ __device__ float3 get(uchar3* src, int x,int y,int w,int h){
if(x < 0 || x>=w || y<0 || y>=h) return make_float3(0.,0.,0.);
uchar3 temp = src[y*w + x];
return make_float3(float(temp.x)/255.,
float(temp.y)/255.,
float(temp.z)/255);
}
__global__ void resizeNormKernel(uchar3* src,float *dst,int dstW, int dstH,int srcW,int srcH,
float scaleX, float scaleY,float shiftX, float shiftY) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int x = idx % dstW;
const int y = idx / dstW;
if (x >= dstW || y >= dstH)
return;
float w = (x - shiftX + 0.5) * scaleX - 0.5;
float h = (y - shiftY + 0.5) * scaleY - 0.5;
int h_low = (int)h;
int w_low = (int)w;
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float3 v1 = get(src,w_low,h_low,srcW,srcH);
float3 v2 = get(src,w_high,h_low,srcW,srcH);
float3 v3 = get(src,w_low,h_high,srcW,srcH);
float3 v4 = get(src,w_high,h_high,srcW,srcH);
int stride = dstW*dstH;
dst[y*dstW + x] = w1 *v1.x + w2 * v2.x + w3 *v3.x + w4 * v4.x ;
dst[stride + y*dstW + x] = w1 *v1.y + w2 * v2.y + w3 *v3.y + w4 * v4.y ;
dst[stride*2 + y*dstW + x] = w1 *v1.z + w2 * v2.z + w3 *v3.z + w4 * v4.z;
}
int resizeAndNorm(void * p,float *d,int w,int h,int in_w,int in_h, bool keepration ,bool keepcenter){
float scaleX = (w*1.0f / in_w);
float scaleY = (h*1.0f / in_h);
float shiftX = 0.f ,shiftY = 0.f;
if(keepration)scaleX = scaleY = scaleX > scaleY ? scaleX : scaleY;
if(keepration && keepcenter){shiftX = (in_w - w/scaleX)/2.f;shiftY = (in_h - h/scaleY)/2.f;}
const int n = in_w*in_h;
const int blockSize = 512;
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( resizeNormKernel), dim3(gridSize), dim3(blockSize), 0, 0, (uchar3*)(p),d,in_w,in_h,w,h,scaleX,scaleY,shiftX,shiftY);
return 0;
} | 7b3d0db99439dfc3c0be1fe148f907a7764cbc0d.cu | #include "resize.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__forceinline__ __device__ float3 get(uchar3* src, int x,int y,int w,int h){
if(x < 0 || x>=w || y<0 || y>=h) return make_float3(0.,0.,0.);
uchar3 temp = src[y*w + x];
return make_float3(float(temp.x)/255.,
float(temp.y)/255.,
float(temp.z)/255);
}
__global__ void resizeNormKernel(uchar3* src,float *dst,int dstW, int dstH,int srcW,int srcH,
float scaleX, float scaleY,float shiftX, float shiftY) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int x = idx % dstW;
const int y = idx / dstW;
if (x >= dstW || y >= dstH)
return;
float w = (x - shiftX + 0.5) * scaleX - 0.5;
float h = (y - shiftY + 0.5) * scaleY - 0.5;
int h_low = (int)h;
int w_low = (int)w;
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float3 v1 = get(src,w_low,h_low,srcW,srcH);
float3 v2 = get(src,w_high,h_low,srcW,srcH);
float3 v3 = get(src,w_low,h_high,srcW,srcH);
float3 v4 = get(src,w_high,h_high,srcW,srcH);
int stride = dstW*dstH;
dst[y*dstW + x] = w1 *v1.x + w2 * v2.x + w3 *v3.x + w4 * v4.x ;
dst[stride + y*dstW + x] = w1 *v1.y + w2 * v2.y + w3 *v3.y + w4 * v4.y ;
dst[stride*2 + y*dstW + x] = w1 *v1.z + w2 * v2.z + w3 *v3.z + w4 * v4.z;
}
int resizeAndNorm(void * p,float *d,int w,int h,int in_w,int in_h, bool keepration ,bool keepcenter){
float scaleX = (w*1.0f / in_w);
float scaleY = (h*1.0f / in_h);
float shiftX = 0.f ,shiftY = 0.f;
if(keepration)scaleX = scaleY = scaleX > scaleY ? scaleX : scaleY;
if(keepration && keepcenter){shiftX = (in_w - w/scaleX)/2.f;shiftY = (in_h - h/scaleY)/2.f;}
const int n = in_w*in_h;
const int blockSize = 512;
const int gridSize = (n + blockSize - 1) / blockSize;
resizeNormKernel<<<gridSize, blockSize, 0>>>((uchar3*)(p),d,in_w,in_h,w,h,scaleX,scaleY,shiftX,shiftY);
return 0;
} |
f400ac566d42c9d0250d2a17981d26657bd3a989.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
/*Lx2Cuda performs the 2-D convolution of matrices A and row vector B*/
__global__ void Lx2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
d_out[Col*numRows+Row]=mask[0]*d_in[Col*numRows+Row]+
mask[1]*(d_in[(Col-1)*numRows+Row]+d_in[(Col+1)*numRows+Row])+
mask[2]*(d_in[(Col-2)*numRows+Row]+d_in[(Col+2)*numRows+Row])+
mask[3]*(d_in[(Col-3)*numRows+Row]+d_in[(Col+3)*numRows+Row])+
mask[4]*(d_in[(Col-4)*numRows+Row]+d_in[(Col+4)*numRows+Row])+
mask[5]*(d_in[(Col-5)*numRows+Row]+d_in[(Col+5)*numRows+Row]);
}
}
/*Lz2 performs the 2-D convolution of matrices A and column vector B*/
__global__ void Lz2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
int Loc = Col*numRows+Row;
d_out[Loc]=mask[0]*d_in[Loc]+
mask[1]*(d_in[Loc-1]+d_in[Loc+1])+
mask[2]*(d_in[Loc-2]+d_in[Loc+2])+
mask[3]*(d_in[Loc-3]+d_in[Loc+3])+
mask[4]*(d_in[Loc-4]+d_in[Loc+4])+
mask[5]*(d_in[Loc-5]+d_in[Loc+5]);
}
}
/*Lz1 performs the 2-D convolution of matrices A and column vector C1*/
__global__ void Lz1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+1]-d_in[Loc-1])+
mask[1]*(d_in[Loc+2]-d_in[Loc-2])+
mask[2]*(d_in[Loc+3]-d_in[Loc-3])+
mask[3]*(d_in[Loc+4]-d_in[Loc-4])+
mask[4]*(d_in[Loc+5]-d_in[Loc-5])+
mask[5]*(d_in[Loc+6]-d_in[Loc-6]);
}
}
/*Lx1 performs the 2-D convolution of matrices A and row vector C1*/
__global__ void Lx1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]-d_in[(Col-6)*numRows+Row]);
}
}
/*sbLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sbLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-0)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]+d_in[(Col-5)*numRows+Row]);
}
}
/*sfLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sfLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+5)*numRows+Row]+d_in[(Col-6)*numRows+Row]);
}
}
/*sbLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sbLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Col*numRows+Row]=
mask[0]*(d_in[Loc+1]-d_in[Loc-0])+
mask[1]*(d_in[Loc+2]-d_in[Loc-1])+
mask[2]*(d_in[Loc+3]-d_in[Loc-2])+
mask[3]*(d_in[Loc+4]-d_in[Loc-3])+
mask[4]*(d_in[Loc+5]-d_in[Loc-4])+
mask[5]*(d_in[Loc+6]+d_in[Loc-5]);
}
}
/*sfLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sfLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+0]-d_in[Loc-1])+
mask[1]*(d_in[Loc+1]-d_in[Loc-2])+
mask[2]*(d_in[Loc+2]-d_in[Loc-3])+
mask[3]*(d_in[Loc+3]-d_in[Loc-4])+
mask[4]*(d_in[Loc+4]-d_in[Loc-5])+
mask[5]*(d_in[Loc+5]+d_in[Loc-6]);
}
}
/*rsgffd performs the 2-D forward rotated staggered-grid finite difference*/
__global__ void rsgffd(const float *d_in,float *d_outx,float *d_outz,const int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row-0]-d_in[(Col-0)*numRows+Row+1])+
mask[1]*(d_in[(Col+2)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+2])+
mask[2]*(d_in[(Col+3)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+3])+
mask[3]*(d_in[(Col+4)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+4])+
mask[4]*(d_in[(Col+5)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+5])+
mask[5]*(d_in[(Col+6)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+6]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-0)*numRows+Row-0])+
mask[1]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-1)*numRows+Row-1])+
mask[2]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-2)*numRows+Row-2])+
mask[3]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-3)*numRows+Row-3])+
mask[4]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-4)*numRows+Row-4])+
mask[5]*(d_in[(Col+6)*numRows+Row+6]-d_in[(Col-5)*numRows+Row-5]);
}
}
/*rsgbfd performs the 2-D backward rotated staggered-grid finite difference*/
__global__ void rsgbfd(const float *d_in,float *d_outx,float *d_outz,int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+0])+
mask[1]*(d_in[(Col+1)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+1])+
mask[2]*(d_in[(Col+2)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+2])+
mask[3]*(d_in[(Col+3)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+3])+
mask[4]*(d_in[(Col+4)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+4])+
mask[5]*(d_in[(Col+5)*numRows+Row-6]-d_in[(Col-6)*numRows+Row+5]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row+0]-d_in[(Col-1)*numRows+Row-1])+
mask[1]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-2)*numRows+Row-2])+
mask[2]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-3)*numRows+Row-3])+
mask[3]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-4)*numRows+Row-4])+
mask[4]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-5)*numRows+Row-5])+
mask[5]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-6)*numRows+Row-6]);
}
}
| f400ac566d42c9d0250d2a17981d26657bd3a989.cu | #include <cuda.h>
/*Lx2Cuda performs the 2-D convolution of matrices A and row vector B*/
__global__ void Lx2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
d_out[Col*numRows+Row]=mask[0]*d_in[Col*numRows+Row]+
mask[1]*(d_in[(Col-1)*numRows+Row]+d_in[(Col+1)*numRows+Row])+
mask[2]*(d_in[(Col-2)*numRows+Row]+d_in[(Col+2)*numRows+Row])+
mask[3]*(d_in[(Col-3)*numRows+Row]+d_in[(Col+3)*numRows+Row])+
mask[4]*(d_in[(Col-4)*numRows+Row]+d_in[(Col+4)*numRows+Row])+
mask[5]*(d_in[(Col-5)*numRows+Row]+d_in[(Col+5)*numRows+Row]);
}
}
/*Lz2 performs the 2-D convolution of matrices A and column vector B*/
__global__ void Lz2(const float *d_in,float *d_out,int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((4<Row) && (Row<numRows-5) && (4<Col) && (Col<numCols-5)){
int Loc = Col*numRows+Row;
d_out[Loc]=mask[0]*d_in[Loc]+
mask[1]*(d_in[Loc-1]+d_in[Loc+1])+
mask[2]*(d_in[Loc-2]+d_in[Loc+2])+
mask[3]*(d_in[Loc-3]+d_in[Loc+3])+
mask[4]*(d_in[Loc-4]+d_in[Loc+4])+
mask[5]*(d_in[Loc-5]+d_in[Loc+5]);
}
}
/*Lz1 performs the 2-D convolution of matrices A and column vector C1*/
__global__ void Lz1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+1]-d_in[Loc-1])+
mask[1]*(d_in[Loc+2]-d_in[Loc-2])+
mask[2]*(d_in[Loc+3]-d_in[Loc-3])+
mask[3]*(d_in[Loc+4]-d_in[Loc-4])+
mask[4]*(d_in[Loc+5]-d_in[Loc-5])+
mask[5]*(d_in[Loc+6]-d_in[Loc-6]);
}
}
/*Lx1 performs the 2-D convolution of matrices A and row vector C1*/
__global__ void Lx1(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]-d_in[(Col-6)*numRows+Row]);
}
}
/*sbLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sbLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-0)*numRows+Row])+
mask[1]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[2]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[3]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[4]*(d_in[(Col+5)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[5]*(d_in[(Col+6)*numRows+Row]+d_in[(Col-5)*numRows+Row]);
}
}
/*sfLx performs the 2-D convolution of matrices A and row vector S1*/
__global__ void sfLx(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_out[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row]-d_in[(Col-1)*numRows+Row])+
mask[1]*(d_in[(Col+1)*numRows+Row]-d_in[(Col-2)*numRows+Row])+
mask[2]*(d_in[(Col+2)*numRows+Row]-d_in[(Col-3)*numRows+Row])+
mask[3]*(d_in[(Col+3)*numRows+Row]-d_in[(Col-4)*numRows+Row])+
mask[4]*(d_in[(Col+4)*numRows+Row]-d_in[(Col-5)*numRows+Row])+
mask[5]*(d_in[(Col+5)*numRows+Row]+d_in[(Col-6)*numRows+Row]);
}
}
/*sbLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sbLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Col*numRows+Row]=
mask[0]*(d_in[Loc+1]-d_in[Loc-0])+
mask[1]*(d_in[Loc+2]-d_in[Loc-1])+
mask[2]*(d_in[Loc+3]-d_in[Loc-2])+
mask[3]*(d_in[Loc+4]-d_in[Loc-3])+
mask[4]*(d_in[Loc+5]-d_in[Loc-4])+
mask[5]*(d_in[Loc+6]+d_in[Loc-5]);
}
}
/*sfLz performs the 2-D convolution of matrices A and column vector S1*/
__global__ void sfLz(const float *d_in,float *d_out, int numRows,int numCols, float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
int Loc = Col*numRows+Row;
d_out[Loc]=
mask[0]*(d_in[Loc+0]-d_in[Loc-1])+
mask[1]*(d_in[Loc+1]-d_in[Loc-2])+
mask[2]*(d_in[Loc+2]-d_in[Loc-3])+
mask[3]*(d_in[Loc+3]-d_in[Loc-4])+
mask[4]*(d_in[Loc+4]-d_in[Loc-5])+
mask[5]*(d_in[Loc+5]+d_in[Loc-6]);
}
}
/*rsgffd performs the 2-D forward rotated staggered-grid finite difference*/
__global__ void rsgffd(const float *d_in,float *d_outx,float *d_outz,const int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row-0]-d_in[(Col-0)*numRows+Row+1])+
mask[1]*(d_in[(Col+2)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+2])+
mask[2]*(d_in[(Col+3)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+3])+
mask[3]*(d_in[(Col+4)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+4])+
mask[4]*(d_in[(Col+5)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+5])+
mask[5]*(d_in[(Col+6)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+6]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-0)*numRows+Row-0])+
mask[1]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-1)*numRows+Row-1])+
mask[2]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-2)*numRows+Row-2])+
mask[3]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-3)*numRows+Row-3])+
mask[4]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-4)*numRows+Row-4])+
mask[5]*(d_in[(Col+6)*numRows+Row+6]-d_in[(Col-5)*numRows+Row-5]);
}
}
/*rsgbfd performs the 2-D backward rotated staggered-grid finite difference*/
__global__ void rsgbfd(const float *d_in,float *d_outx,float *d_outz,int numRows,int numCols,float *mask)
{
//Calculate the row # of the d_in and d_out element to process
int Col = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the column # of the d_in and d_out element to process
int Row = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one elements
if ((5<Row) && (Row<numRows-6) && (5<Col) && (Col<numCols-6)){
d_outx[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row-1]-d_in[(Col-1)*numRows+Row+0])+
mask[1]*(d_in[(Col+1)*numRows+Row-2]-d_in[(Col-2)*numRows+Row+1])+
mask[2]*(d_in[(Col+2)*numRows+Row-3]-d_in[(Col-3)*numRows+Row+2])+
mask[3]*(d_in[(Col+3)*numRows+Row-4]-d_in[(Col-4)*numRows+Row+3])+
mask[4]*(d_in[(Col+4)*numRows+Row-5]-d_in[(Col-5)*numRows+Row+4])+
mask[5]*(d_in[(Col+5)*numRows+Row-6]-d_in[(Col-6)*numRows+Row+5]);
d_outz[Col*numRows+Row]=
mask[0]*(d_in[(Col+0)*numRows+Row+0]-d_in[(Col-1)*numRows+Row-1])+
mask[1]*(d_in[(Col+1)*numRows+Row+1]-d_in[(Col-2)*numRows+Row-2])+
mask[2]*(d_in[(Col+2)*numRows+Row+2]-d_in[(Col-3)*numRows+Row-3])+
mask[3]*(d_in[(Col+3)*numRows+Row+3]-d_in[(Col-4)*numRows+Row-4])+
mask[4]*(d_in[(Col+4)*numRows+Row+4]-d_in[(Col-5)*numRows+Row-5])+
mask[5]*(d_in[(Col+5)*numRows+Row+5]-d_in[(Col-6)*numRows+Row-6]);
}
}
|
08718c31263a20f0a2c584ef129d63aa094c57d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfgx-v2.cu normal z -> c, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, magma_int_t it)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_clarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, it);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, magma_int_t i,
magmaFloatComplex *V, magma_int_t ldv, magmaFloatComplex *T, magma_int_t ldt,
magmaFloatComplex *work)
{
/* Generate the elementary reflector H(i) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i);
if (i==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the i-th column of T */
hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, work, dtau);
hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, ldt, work, T+i*ldt, dtau);
}
}
//==============================================================================
| 08718c31263a20f0a2c584ef129d63aa094c57d7.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfgx-v2.cu normal z -> c, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, magma_int_t it)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
magma_clarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, it);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, magma_int_t i,
magmaFloatComplex *V, magma_int_t ldv, magmaFloatComplex *T, magma_int_t ldt,
magmaFloatComplex *work)
{
/* Generate the elementary reflector H(i) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i);
if (i==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the i-th column of T */
magma_cgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>(n, V, ldv, dx0, work, dtau);
magma_ctrmv_kernel2<<< i, i, 0, magma_stream >>>( T, ldt, work, T+i*ldt, dtau);
}
}
//==============================================================================
|
6cde54066247ba1d3001ded08ddd988b4c8de74e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void g_getCost_3(float* cost,
float* weight,
float lambda, int wlen)
{
__shared__ float _sum[32];
_sum[threadIdx.x] = 0;
__syncthreads();
for(int i = 0; i < wlen; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < wlen)
{
_sum[threadIdx.x] += weight[id] * weight[id];
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < skip && (threadIdx.x + skip) < len)
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = skip;
}
} | 6cde54066247ba1d3001ded08ddd988b4c8de74e.cu | __global__ void g_getCost_3(float* cost,
float* weight,
float lambda, int wlen)
{
__shared__ float _sum[32];
_sum[threadIdx.x] = 0;
__syncthreads();
for(int i = 0; i < wlen; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < wlen)
{
_sum[threadIdx.x] += weight[id] * weight[id];
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < skip && (threadIdx.x + skip) < len)
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = skip;
}
} |
917f7c4b076d8c2634676b44606514417c0dbf41.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 2
#define TC 16
#define C 64
#define N 32
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[192];
__shared__ float kernel_shared[2304];
float pad_temp_shared_local[24];
float kernel_shared_local[36];
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute_local[(((ff_c_init * 2) + xx_c_init))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 12) + (((int)threadIdx.y) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 4) + (((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6))) && (((((int)blockIdx.y) * 4) + (((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6)) < 29)) && (1 <= ((((int)blockIdx.x) * 2) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)))) && (((((int)blockIdx.x) * 2) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)) < 29)) ? data[((((((((rc_outer * 6272) + ((((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) / 6) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6) * 28)) + (((int)blockIdx.x) * 2)) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)) - 29))] : 0.000000e+00f);
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 36; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
kernel_shared[((((((int)threadIdx.z) * 144) + (((int)threadIdx.y) * 36)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((int)threadIdx.z) * 1152) + ((((((int)threadIdx.y) * 4) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 9)) >> 3) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.y) * 4) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 9)) & 7) * 9)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 9)))];
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
for (int ax1 = 0; ax1 < 2; ++ax1) {
for (int ax2 = 0; ax2 < 3; ++ax2) {
for (int ax3 = 0; ax3 < 4; ++ax3) {
pad_temp_shared_local[((((ax1 * 12) + (ax2 * 4)) + ax3))] = pad_temp_shared[((((((rc_inner_outer * 48) + (ax1 * 24)) + (ax2 * 4)) + (((int)threadIdx.y) * 4)) + ax3))];
}
}
}
for (int ax0 = 0; ax0 < 2; ++ax0) {
for (int ax11 = 0; ax11 < 2; ++ax11) {
for (int ax21 = 0; ax21 < 3; ++ax21) {
for (int ax31 = 0; ax31 < 3; ++ax31) {
kernel_shared_local[(((((ax0 * 18) + (ax11 * 9)) + (ax21 * 3)) + ax31))] = kernel_shared[(((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (rc_inner_outer * 18)) + (ax11 * 9)) + (ax21 * 3)) + ax31))];
}
}
}
}
for (int rc_inner_inner = 0; rc_inner_inner < 2; ++rc_inner_inner) {
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
for (int rx_inner_inner = 0; rx_inner_inner < 3; ++rx_inner_inner) {
for (int ff_c = 0; ff_c < 2; ++ff_c) {
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute_local[(((ff_c * 2) + xx_c))] = (compute_local[(((ff_c * 2) + xx_c))] + (pad_temp_shared_local[(((((rc_inner_inner * 12) + (ry_inner_inner * 4)) + xx_c) + rx_inner_inner))] * kernel_shared_local[(((((ff_c * 18) + (rc_inner_inner * 9)) + (ry_inner_inner * 3)) + rx_inner_inner))]));
}
}
}
}
}
}
}
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
for (int xx_inner_inner_inner = 0; xx_inner_inner_inner < 2; ++xx_inner_inner_inner) {
compute[(((((((((int)threadIdx.z) * 1568) + (ff_inner_inner_inner * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + xx_inner_inner_inner))] = compute_local[(((ff_inner_inner_inner * 2) + xx_inner_inner_inner))];
}
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,7,1);
dim3 block(1,4,16);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| 917f7c4b076d8c2634676b44606514417c0dbf41.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 2
#define TC 16
#define C 64
#define N 32
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[192];
__shared__ float kernel_shared[2304];
float pad_temp_shared_local[24];
float kernel_shared_local[36];
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute_local[(((ff_c_init * 2) + xx_c_init))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 12) + (((int)threadIdx.y) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 4) + (((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6))) && (((((int)blockIdx.y) * 4) + (((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6)) < 29)) && (1 <= ((((int)blockIdx.x) * 2) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)))) && (((((int)blockIdx.x) * 2) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)) < 29)) ? data[((((((((rc_outer * 6272) + ((((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) / 6) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.z) * 3) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 2)) % 6) * 28)) + (((int)blockIdx.x) * 2)) + (((((int)threadIdx.y) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 3)) - 29))] : 0.000000e+00f);
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 36; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
kernel_shared[((((((int)threadIdx.z) * 144) + (((int)threadIdx.y) * 36)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((int)threadIdx.z) * 1152) + ((((((int)threadIdx.y) * 4) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 9)) >> 3) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.y) * 4) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 9)) & 7) * 9)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 9)))];
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
for (int ax1 = 0; ax1 < 2; ++ax1) {
for (int ax2 = 0; ax2 < 3; ++ax2) {
for (int ax3 = 0; ax3 < 4; ++ax3) {
pad_temp_shared_local[((((ax1 * 12) + (ax2 * 4)) + ax3))] = pad_temp_shared[((((((rc_inner_outer * 48) + (ax1 * 24)) + (ax2 * 4)) + (((int)threadIdx.y) * 4)) + ax3))];
}
}
}
for (int ax0 = 0; ax0 < 2; ++ax0) {
for (int ax11 = 0; ax11 < 2; ++ax11) {
for (int ax21 = 0; ax21 < 3; ++ax21) {
for (int ax31 = 0; ax31 < 3; ++ax31) {
kernel_shared_local[(((((ax0 * 18) + (ax11 * 9)) + (ax21 * 3)) + ax31))] = kernel_shared[(((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (rc_inner_outer * 18)) + (ax11 * 9)) + (ax21 * 3)) + ax31))];
}
}
}
}
for (int rc_inner_inner = 0; rc_inner_inner < 2; ++rc_inner_inner) {
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
for (int rx_inner_inner = 0; rx_inner_inner < 3; ++rx_inner_inner) {
for (int ff_c = 0; ff_c < 2; ++ff_c) {
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute_local[(((ff_c * 2) + xx_c))] = (compute_local[(((ff_c * 2) + xx_c))] + (pad_temp_shared_local[(((((rc_inner_inner * 12) + (ry_inner_inner * 4)) + xx_c) + rx_inner_inner))] * kernel_shared_local[(((((ff_c * 18) + (rc_inner_inner * 9)) + (ry_inner_inner * 3)) + rx_inner_inner))]));
}
}
}
}
}
}
}
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
for (int xx_inner_inner_inner = 0; xx_inner_inner_inner < 2; ++xx_inner_inner_inner) {
compute[(((((((((int)threadIdx.z) * 1568) + (ff_inner_inner_inner * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + xx_inner_inner_inner))] = compute_local[(((ff_inner_inner_inner * 2) + xx_inner_inner_inner))];
}
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,7,1);
dim3 block(1,4,16);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
b8228fe490846d031ede40b8d282320f607549f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/AccumulateType.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor64<scalar_t, 4> output,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart;
inputData += slice * itime * iheight * iwidth;
scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
int index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || THCNumerics<scalar_t>::isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data,
output.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
AT_CUDA_CHECK(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
gpuAtomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInputData,
gradOutput.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
AT_CUDA_CHECK(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("max_pool3d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_out_frame", [&] {
scalar_t *input_data = work_input.data_ptr<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
});
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_backward_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_backward_out_frame", [&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
oheight, owidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
});
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda");
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
| b8228fe490846d031ede40b8d282320f607549f1.cu | #include <ATen/AccumulateType.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor64<scalar_t, 4> output,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart;
inputData += slice * itime * iheight * iwidth;
scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
int index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || THCNumerics<scalar_t>::isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_pool3d_with_indices_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_data,
output.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
AT_CUDA_CHECK(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
gpuAtomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_pool3d_with_indices_backward_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInputData,
gradOutput.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
AT_CUDA_CHECK(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("max_pool3d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_out_frame", [&] {
scalar_t *input_data = work_input.data_ptr<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
});
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_backward_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_backward_out_frame", [&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
oheight, owidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
});
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda");
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
|
a1ad08f8cc974cb204f4530deaca0a84ce746b14.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <unistd.h>
#define BLOCKSIZEX 8
#define BLOCKSIZEY 8
#define BLOCKSIZEZ 8
void checkCUDAError (const char *msg);
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void midprint (float *campo, int Lx, int Ly, int Lz);
void cria_arquivo(float *campo, int x, int y,int t, int Lx, int Ly, int Lz);
__global__ void WaveStepH (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly, int Lz, float mx, float my, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
if( j< Ly && i<Lx && k<Lz)
{
Hx[4*i+j*Lx+k*Lx*Ly] = Hx[4*i+j*Lx+k*Lx*Ly] + mx * (Ey[4*i+j*Lx+(k+1)*Lx*Ly] - Ey[4*i+j*Lx+k*Lx*Ly] - Ez[4*i+(j+1)*Lx+k*Lx*Ly] + Ez[4*i+j*Lx+k*Lx*Ly] );
Hx[(4*i+1)+j*Lx+k*Lx*Ly] = Hx[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+1)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+1)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+1)+j*Lx+k*Lx*Ly] );
Hx[(4*i+2)+j*Lx+k*Lx*Ly] = Hx[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+2)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+2)+j*Lx+k*Lx*Ly] );
Hx[(4*i+3)+j*Lx+k*Lx*Ly] = Hx[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+3)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+3)+j*Lx+k*Lx*Ly] );
Hy[4*i+j*Lx+k*Lx*Ly] = Hy[4*i+j*Lx+k*Lx*Ly] + mx * (Ez[4*i+1+j*Lx+k*Lx*Ly] - Ez[4*i+j*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+(k+1)*Lx*Ly] + Ex[4*i+j*Lx+k*Lx*Ly] );
Hy[(4*i+1)+j*Lx+k*Lx*Ly] = Hy[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+j*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+1)+j*Lx+k*Lx*Ly] );
Hy[(4*i+2)+j*Lx+k*Lx*Ly] = Hy[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+2)+j*Lx+k*Lx*Ly] );
Hy[(4*i+3)+j*Lx+k*Lx*Ly] = Hy[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+4)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+3)+j*Lx+k*Lx*Ly] );
Hz[4*i+j*Lx+k*Lx*Ly] = Hz[4*i+j*Lx+k*Lx*Ly] + mx * (Ex[4*i+(j+1)*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+k*Lx*Ly] - Ey[4*i+1+j*Lx+k*Lx*Ly] + Ey[4*i+j*Lx+k*Lx*Ly] );
Hz[(4*i+1)+j*Lx+k*Lx*Ly] = Hz[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+1)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+k*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] + Ey[(4*i+1)+j*Lx+k*Lx*Ly] );
Hz[(4*i+2)+j*Lx+k*Lx*Ly] = Hz[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+2)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+k*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] + Ey[(4*i+2)+j*Lx+k*Lx*Ly] );
Hz[(4*i+3)+j*Lx+k*Lx*Ly] = Hz[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+3)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+k*Lx*Ly] - Ey[(4*i+4)+j*Lx+k*Lx*Ly] + Ey[(4*i+3)+j*Lx+k*Lx*Ly] );
}
}
__global__ void WaveStepE (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly,int Lz, float field, float *ez, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
// if( j>1 && i>1 && k>1 && j<Ly-1 && i<Lx-1 && k<Lz-1)
if( j>0 && i>0 && k>0 )
{
Ex[4*i+j*Lx+k*Lx*Ly] = Ex[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hz[4*i+j*Lx+k*Lx*Ly] - Hz[4*i+(j-1)*Lx+k*Lx*Ly] - Hy[4*i+j*Lx+k*Lx*Ly] + Hy[4*i+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+1)+j*Lx+k*Lx*Ly] = Ex[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hz[(4*i+1)+j*Lx+k*Lx*Ly] - Hz[(4*i+1)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] + Hy[(4*i+1)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+2)+j*Lx+k*Lx*Ly] = Ex[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hz[(4*i+2)+j*Lx+k*Lx*Ly] - Hz[(4*i+2)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] + Hy[(4*i+2)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+3)+j*Lx+k*Lx*Ly] = Ex[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hz[(4*i+3)+j*Lx+k*Lx*Ly] - Hz[(4*i+3)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+3)+j*Lx+k*Lx*Ly] + Hy[(4*i+3)+j*Lx+(k-1)*Lx*Ly] );
Ey[4*i+j*Lx+k*Lx*Ly] = Ey[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hx[4*i+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+(k-1)*Lx*Ly] - Hz[4*i+j*Lx+k*Lx*Ly] + Hz[4*i-1+j*Lx+k*Lx*Ly] );
Ey[(4*i+1)+j*Lx+k*Lx*Ly] = Ey[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hx[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+1)+j*Lx+k*Lx*Ly] + Hz[(4*i)+j*Lx+k*Lx*Ly] );
Ey[(4*i+2)+j*Lx+k*Lx*Ly] = Ey[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hx[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+2)+j*Lx+k*Lx*Ly] + Hz[(4*i+1)+j*Lx+k*Lx*Ly] );
Ey[(4*i+3)+j*Lx+k*Lx*Ly] = Ey[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hx[(4*i+3)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+3)+j*Lx+k*Lx*Ly] + Hz[(4*i+2)+j*Lx+k*Lx*Ly] );
Ez[4*i+j*Lx+k*Lx*Ly] = Ez[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hy[4*i+j*Lx+k*Lx*Ly] - Hy[4*i-1+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+k*Lx*Ly] + Hx[4*i+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+1)+j*Lx+k*Lx*Ly] = Ez[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hy[(4*i)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+k*Lx*Ly] + Hx[(4*i+1)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+2)+j*Lx+k*Lx*Ly] = Ez[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+k*Lx*Ly] + Hx[(4*i+2)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+3)+j*Lx+k*Lx*Ly] = Ez[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hy[(4*i+3)+j*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+k*Lx*Ly] + Hx[(4*i+3)+(j-1)*Lx+k*Lx*Ly] );
Ez[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]=field; ////// fonte
out[4*i+j*Lx+k*Lx*Ly]=Ez[4*i+j*Lx+k*Lx*Ly];
out[(4*i+1)+j*Lx+k*Lx*Ly]=Ez[(4*i+1)+j*Lx+k*Lx*Ly];
out[(4*i+2)+j*Lx+k*Lx*Ly]=Ez[(4*i+2)+j*Lx+k*Lx*Ly];
out[(4*i+3)+j*Lx+k*Lx*Ly]=Ez[(4*i+3)+j*Lx+k*Lx*Ly];
}
}
int main (int argc, char **argv)
{
int i,j,k;
/////////// Set Domain Sizes ////////////
int Lx=32; //computational x size set by user
int Ly=32; //computational y size set by user
int Lz=32;
dim3 dimBlock (BLOCKSIZEX, BLOCKSIZEY,BLOCKSIZEZ); //dimensions of threads block
dim3 dimGrid (( Lx / (4*dimBlock.x) + ( Lx % (4*dimBlock.x) == 0?0:1)), ( Ly / (dimBlock.y) + ( Ly % (dimBlock.y) == 0?0:1)), ( Lz / (dimBlock.z) + ( Lz % (dimBlock.z) == 0?0:1))); //grid size that fits the user domain
Lx=4*dimBlock.x*dimGrid.x; //computational x size
Ly=dimBlock.y*dimGrid.y; //computational y size
Lz=dimBlock.z*dimGrid.z;
int D=Lx*Ly*Lz; //total computational domais.
int Dsize=D*sizeof(float);
//////////////////////////////////////////
// printf("%d %d\n",Lx,Ly);
///////////////////////////Physical Quantities/////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
float pi=4.0*atan(1.0);
float muo=4.0*pi*1.0e-7; // Permeability of free space
float epso=8.854e-12; // Permittivity of free space
float co=1.0/sqrt(muo*epso); // Speed of light in free space
//aimp=sqrt(muo/epso); // Wave impedance in free space
float dx=0.0001; // FDTD cell size
float dt=dx/co/sqrt(3.0); // Time step size
////////// eletrical permittivity ////////
float *ez_h;
ez_h = (float *)malloc(Dsize);
float *ez;
hipMalloc ((void **) &ez, Dsize);
////////////////////////////////////////////
for(i=0;i<Lx;i++)
{
for(j=0;j<Ly;j++)
{
for(k=0;k<Lz;k++)
{
ez_h[i+Lx*j+Lx*Ly*k]=dt/(epso*dx);
}
}
}
float mx = dt/muo/dx;
float my = mx;
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////// Aloccate Ex field arrays ////////
float *Ex_h;
Ex_h = (float *)malloc(Dsize);
float *Ex;
hipMalloc ((void **) &Ex, Dsize);
////////// Aloccate Ey field arrays ////////
float *Ey_h;
Ey_h = (float *)malloc(Dsize);
float *Ey;
hipMalloc ((void **) &Ey, Dsize);
////////// Aloccate Ez field arrays ////////
float *Ez_h;
Ez_h = (float *)malloc(Dsize);
float *Ez;
hipMalloc ((void **) &Ez, Dsize);
////////// Aloccate Hxfield arrays ////////
float *Hx_h;
Hx_h = (float *)malloc(Dsize);
float *Hx;
hipMalloc ((void **) &Hx, Dsize);
////////// Aloccate Hy field arrays ////////
float *Hy_h;
Hy_h = (float *)malloc(Dsize);
float *Hy;
hipMalloc ((void **) &Hy, Dsize);
////////// Aloccate Hz field arrays ////////
float *Hz_h;
Hz_h = (float *)malloc(Dsize);
float *Hz;
hipMalloc ((void **) &Hz, Dsize);
////////// Debbuger out array ////////
float *out_h;
out_h = (float *)malloc(Dsize);
float *out;
hipMalloc ((void **) &out, Dsize);
////////////////////////////////////////////
/////////// Null Field initial condition //////
for(i=0;i<Lx+1;i++)
{
for(j=0;j<Ly+1;j++)
{
for(k=0;k<Lz+1;k++)
{
Ex_h[i+Lx*j+Lx*Ly*k]=0.f;
Ey_h[i+Lx*j+Lx*Ly*k]=0.f;
Ez_h[i+Lx*j+Lx*Ly*k]=0.f;
Hx_h[i+Lx*j+Lx*Ly*k]=0.f;
Hy_h[i+Lx*j+Lx*Ly*k]=0.f;
Hz_h[i+Lx*j+Lx*Ly*k]=0.f;
}
}
}
///////////////////////////////////////////////////////////
/////////////// Coping data to Device /////////////////////
hipMemcpy (Ex, Ex_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (Ey, Ey_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (Ez, Ez_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (Hx, Hx_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (Hy, Hy_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (Hz, Hz_h, Dsize, hipMemcpyHostToDevice);
hipMemcpy (ez, ez_h, Dsize, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////
////////////////////Time iteration ////////////////////////
int T=4000;
int b = 25.0;
float dum,voltage,field;
for (int t = 0; t < T; t = t + 1) //iterando no tempo
{
dum = (4.0/b/dt)*(t*dt-b*dt);
voltage = 2.0*dum*exp(-(pow(dum,2.f)));
// if(t<50)
// {
field = voltage/dx;
// }
// else
// {
// field=Ez_h[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly];
//
// }
hipLaunchKernelGGL(( WaveStepH) , dim3(dimGrid), dim3(dimBlock) , 0, 0, Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, mx, my,out);
hipLaunchKernelGGL(( WaveStepE) , dim3(dimGrid), dim3(dimBlock) , 0, 0, Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, field, ez,out);
checkCUDAError ("kernel invocation");
hipMemcpy (out_h, out, Dsize, hipMemcpyDeviceToHost);
// hipMemcpy (Ez_h, Ez, Dsize, hipMemcpyDeviceToHost);
// hipMemcpy (Hy_h, Hy, Dsize, hipMemcpyDeviceToHost);
checkCUDAError ("getting data from device");
// dprint (Ez_h, Dx, Dy);
// if (t%10==0)
// {
// cria_arquivo(out_h, Lx , Ly , t, Lx , Ly , Lz );
// }
dprint ( out_h, Lx , Ly , Lx , Ly , Lz );
}
// mprint ( Ez_h, Lx , Ly , Lx , Ly , Lz );
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
void checkCUDAError (const char *msg)
{
hipError_t err = hipGetLastError ();
if (hipSuccess != err)
{
fprintf (stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString (err));
exit (EXIT_FAILURE);
}
}
///////////////////////////////////////////////////////////
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz)
{
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
// if (campo[i+j*Lx+k*Lx*Ly]!=0.f)
{
printf("%d %d %f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
// printf("i=%d j=%d campo=%f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
}
}
///////////////////////////////////////////////////////////////////
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz )
{
for(int j = 0;j < y; j++)
{
for(int i = 0; i < x; i++)
{
printf("%g ", campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
printf("\n");
}
printf("\n");
}
//////////////////////////////////////////////////////////////////////
void midprint (float *campo, int Lx, int Ly, int Lz)
{
printf("%f ", campo[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]);
printf("\n");
}
/////////////////////////////////////////////////////
void cria_arquivo(float *campo, int x, int y, int t, int Lx, int Ly, int Lz)
{
FILE *onda;
//remove("DATOS_ONDA");
onda = fopen ("DATOS_ONDA", "a");
fprintf(onda, "valor de t=%d ******************************************************************** \n",t);
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
fprintf(onda, " i=%d j=%d campo=%f \n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
fclose(onda);
}
| a1ad08f8cc974cb204f4530deaca0a84ce746b14.cu | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <unistd.h>
#define BLOCKSIZEX 8
#define BLOCKSIZEY 8
#define BLOCKSIZEZ 8
void checkCUDAError (const char *msg);
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void midprint (float *campo, int Lx, int Ly, int Lz);
void cria_arquivo(float *campo, int x, int y,int t, int Lx, int Ly, int Lz);
__global__ void WaveStepH (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly, int Lz, float mx, float my, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
if( j< Ly && i<Lx && k<Lz)
{
Hx[4*i+j*Lx+k*Lx*Ly] = Hx[4*i+j*Lx+k*Lx*Ly] + mx * (Ey[4*i+j*Lx+(k+1)*Lx*Ly] - Ey[4*i+j*Lx+k*Lx*Ly] - Ez[4*i+(j+1)*Lx+k*Lx*Ly] + Ez[4*i+j*Lx+k*Lx*Ly] );
Hx[(4*i+1)+j*Lx+k*Lx*Ly] = Hx[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+1)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+1)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+1)+j*Lx+k*Lx*Ly] );
Hx[(4*i+2)+j*Lx+k*Lx*Ly] = Hx[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+2)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+2)+j*Lx+k*Lx*Ly] );
Hx[(4*i+3)+j*Lx+k*Lx*Ly] = Hx[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+3)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+3)+j*Lx+k*Lx*Ly] );
Hy[4*i+j*Lx+k*Lx*Ly] = Hy[4*i+j*Lx+k*Lx*Ly] + mx * (Ez[4*i+1+j*Lx+k*Lx*Ly] - Ez[4*i+j*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+(k+1)*Lx*Ly] + Ex[4*i+j*Lx+k*Lx*Ly] );
Hy[(4*i+1)+j*Lx+k*Lx*Ly] = Hy[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+j*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+1)+j*Lx+k*Lx*Ly] );
Hy[(4*i+2)+j*Lx+k*Lx*Ly] = Hy[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+2)+j*Lx+k*Lx*Ly] );
Hy[(4*i+3)+j*Lx+k*Lx*Ly] = Hy[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+4)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+3)+j*Lx+k*Lx*Ly] );
Hz[4*i+j*Lx+k*Lx*Ly] = Hz[4*i+j*Lx+k*Lx*Ly] + mx * (Ex[4*i+(j+1)*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+k*Lx*Ly] - Ey[4*i+1+j*Lx+k*Lx*Ly] + Ey[4*i+j*Lx+k*Lx*Ly] );
Hz[(4*i+1)+j*Lx+k*Lx*Ly] = Hz[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+1)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+k*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] + Ey[(4*i+1)+j*Lx+k*Lx*Ly] );
Hz[(4*i+2)+j*Lx+k*Lx*Ly] = Hz[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+2)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+k*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] + Ey[(4*i+2)+j*Lx+k*Lx*Ly] );
Hz[(4*i+3)+j*Lx+k*Lx*Ly] = Hz[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+3)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+k*Lx*Ly] - Ey[(4*i+4)+j*Lx+k*Lx*Ly] + Ey[(4*i+3)+j*Lx+k*Lx*Ly] );
}
}
__global__ void WaveStepE (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly,int Lz, float field, float *ez, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
// if( j>1 && i>1 && k>1 && j<Ly-1 && i<Lx-1 && k<Lz-1)
if( j>0 && i>0 && k>0 )
{
Ex[4*i+j*Lx+k*Lx*Ly] = Ex[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hz[4*i+j*Lx+k*Lx*Ly] - Hz[4*i+(j-1)*Lx+k*Lx*Ly] - Hy[4*i+j*Lx+k*Lx*Ly] + Hy[4*i+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+1)+j*Lx+k*Lx*Ly] = Ex[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hz[(4*i+1)+j*Lx+k*Lx*Ly] - Hz[(4*i+1)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] + Hy[(4*i+1)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+2)+j*Lx+k*Lx*Ly] = Ex[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hz[(4*i+2)+j*Lx+k*Lx*Ly] - Hz[(4*i+2)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] + Hy[(4*i+2)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+3)+j*Lx+k*Lx*Ly] = Ex[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hz[(4*i+3)+j*Lx+k*Lx*Ly] - Hz[(4*i+3)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+3)+j*Lx+k*Lx*Ly] + Hy[(4*i+3)+j*Lx+(k-1)*Lx*Ly] );
Ey[4*i+j*Lx+k*Lx*Ly] = Ey[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hx[4*i+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+(k-1)*Lx*Ly] - Hz[4*i+j*Lx+k*Lx*Ly] + Hz[4*i-1+j*Lx+k*Lx*Ly] );
Ey[(4*i+1)+j*Lx+k*Lx*Ly] = Ey[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hx[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+1)+j*Lx+k*Lx*Ly] + Hz[(4*i)+j*Lx+k*Lx*Ly] );
Ey[(4*i+2)+j*Lx+k*Lx*Ly] = Ey[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hx[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+2)+j*Lx+k*Lx*Ly] + Hz[(4*i+1)+j*Lx+k*Lx*Ly] );
Ey[(4*i+3)+j*Lx+k*Lx*Ly] = Ey[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hx[(4*i+3)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+3)+j*Lx+k*Lx*Ly] + Hz[(4*i+2)+j*Lx+k*Lx*Ly] );
Ez[4*i+j*Lx+k*Lx*Ly] = Ez[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hy[4*i+j*Lx+k*Lx*Ly] - Hy[4*i-1+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+k*Lx*Ly] + Hx[4*i+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+1)+j*Lx+k*Lx*Ly] = Ez[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hy[(4*i)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+k*Lx*Ly] + Hx[(4*i+1)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+2)+j*Lx+k*Lx*Ly] = Ez[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+k*Lx*Ly] + Hx[(4*i+2)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+3)+j*Lx+k*Lx*Ly] = Ez[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hy[(4*i+3)+j*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+k*Lx*Ly] + Hx[(4*i+3)+(j-1)*Lx+k*Lx*Ly] );
Ez[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]=field; ////// fonte
out[4*i+j*Lx+k*Lx*Ly]=Ez[4*i+j*Lx+k*Lx*Ly];
out[(4*i+1)+j*Lx+k*Lx*Ly]=Ez[(4*i+1)+j*Lx+k*Lx*Ly];
out[(4*i+2)+j*Lx+k*Lx*Ly]=Ez[(4*i+2)+j*Lx+k*Lx*Ly];
out[(4*i+3)+j*Lx+k*Lx*Ly]=Ez[(4*i+3)+j*Lx+k*Lx*Ly];
}
}
int main (int argc, char **argv)
{
int i,j,k;
/////////// Set Domain Sizes ////////////
int Lx=32; //computational x size set by user
int Ly=32; //computational y size set by user
int Lz=32;
dim3 dimBlock (BLOCKSIZEX, BLOCKSIZEY,BLOCKSIZEZ); //dimensions of threads block
dim3 dimGrid (( Lx / (4*dimBlock.x) + ( Lx % (4*dimBlock.x) == 0?0:1)), ( Ly / (dimBlock.y) + ( Ly % (dimBlock.y) == 0?0:1)), ( Lz / (dimBlock.z) + ( Lz % (dimBlock.z) == 0?0:1))); //grid size that fits the user domain
Lx=4*dimBlock.x*dimGrid.x; //computational x size
Ly=dimBlock.y*dimGrid.y; //computational y size
Lz=dimBlock.z*dimGrid.z;
int D=Lx*Ly*Lz; //total computational domais.
int Dsize=D*sizeof(float);
//////////////////////////////////////////
// printf("%d %d\n",Lx,Ly);
///////////////////////////Physical Quantities/////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
float pi=4.0*atan(1.0);
float muo=4.0*pi*1.0e-7; // Permeability of free space
float epso=8.854e-12; // Permittivity of free space
float co=1.0/sqrt(muo*epso); // Speed of light in free space
//aimp=sqrt(muo/epso); // Wave impedance in free space
float dx=0.0001; // FDTD cell size
float dt=dx/co/sqrt(3.0); // Time step size
////////// eletrical permittivity ////////
float *ez_h;
ez_h = (float *)malloc(Dsize);
float *ez;
cudaMalloc ((void **) &ez, Dsize);
////////////////////////////////////////////
for(i=0;i<Lx;i++)
{
for(j=0;j<Ly;j++)
{
for(k=0;k<Lz;k++)
{
ez_h[i+Lx*j+Lx*Ly*k]=dt/(epso*dx);
}
}
}
float mx = dt/muo/dx;
float my = mx;
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////// Aloccate Ex field arrays ////////
float *Ex_h;
Ex_h = (float *)malloc(Dsize);
float *Ex;
cudaMalloc ((void **) &Ex, Dsize);
////////// Aloccate Ey field arrays ////////
float *Ey_h;
Ey_h = (float *)malloc(Dsize);
float *Ey;
cudaMalloc ((void **) &Ey, Dsize);
////////// Aloccate Ez field arrays ////////
float *Ez_h;
Ez_h = (float *)malloc(Dsize);
float *Ez;
cudaMalloc ((void **) &Ez, Dsize);
////////// Aloccate Hxfield arrays ////////
float *Hx_h;
Hx_h = (float *)malloc(Dsize);
float *Hx;
cudaMalloc ((void **) &Hx, Dsize);
////////// Aloccate Hy field arrays ////////
float *Hy_h;
Hy_h = (float *)malloc(Dsize);
float *Hy;
cudaMalloc ((void **) &Hy, Dsize);
////////// Aloccate Hz field arrays ////////
float *Hz_h;
Hz_h = (float *)malloc(Dsize);
float *Hz;
cudaMalloc ((void **) &Hz, Dsize);
////////// Debbuger out array ////////
float *out_h;
out_h = (float *)malloc(Dsize);
float *out;
cudaMalloc ((void **) &out, Dsize);
////////////////////////////////////////////
/////////// Null Field initial condition //////
for(i=0;i<Lx+1;i++)
{
for(j=0;j<Ly+1;j++)
{
for(k=0;k<Lz+1;k++)
{
Ex_h[i+Lx*j+Lx*Ly*k]=0.f;
Ey_h[i+Lx*j+Lx*Ly*k]=0.f;
Ez_h[i+Lx*j+Lx*Ly*k]=0.f;
Hx_h[i+Lx*j+Lx*Ly*k]=0.f;
Hy_h[i+Lx*j+Lx*Ly*k]=0.f;
Hz_h[i+Lx*j+Lx*Ly*k]=0.f;
}
}
}
///////////////////////////////////////////////////////////
/////////////// Coping data to Device /////////////////////
cudaMemcpy (Ex, Ex_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Ey, Ey_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Ez, Ez_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hx, Hx_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hy, Hy_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hz, Hz_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (ez, ez_h, Dsize, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////
////////////////////Time iteration ////////////////////////
int T=4000;
int b = 25.0;
float dum,voltage,field;
for (int t = 0; t < T; t = t + 1) //iterando no tempo
{
dum = (4.0/b/dt)*(t*dt-b*dt);
voltage = 2.0*dum*exp(-(pow(dum,2.f)));
// if(t<50)
// {
field = voltage/dx;
// }
// else
// {
// field=Ez_h[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly];
//
// }
WaveStepH <<< dimGrid, dimBlock >>> (Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, mx, my,out);
WaveStepE <<< dimGrid, dimBlock >>> (Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, field, ez,out);
checkCUDAError ("kernel invocation");
cudaMemcpy (out_h, out, Dsize, cudaMemcpyDeviceToHost);
// cudaMemcpy (Ez_h, Ez, Dsize, cudaMemcpyDeviceToHost);
// cudaMemcpy (Hy_h, Hy, Dsize, cudaMemcpyDeviceToHost);
checkCUDAError ("getting data from device");
// dprint (Ez_h, Dx, Dy);
// if (t%10==0)
// {
// cria_arquivo(out_h, Lx , Ly , t, Lx , Ly , Lz );
// }
dprint ( out_h, Lx , Ly , Lx , Ly , Lz );
}
// mprint ( Ez_h, Lx , Ly , Lx , Ly , Lz );
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
void checkCUDAError (const char *msg)
{
cudaError_t err = cudaGetLastError ();
if (cudaSuccess != err)
{
fprintf (stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString (err));
exit (EXIT_FAILURE);
}
}
///////////////////////////////////////////////////////////
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz)
{
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
// if (campo[i+j*Lx+k*Lx*Ly]!=0.f)
{
printf("%d %d %f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
// printf("i=%d j=%d campo=%f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
}
}
///////////////////////////////////////////////////////////////////
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz )
{
for(int j = 0;j < y; j++)
{
for(int i = 0; i < x; i++)
{
printf("%g ", campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
printf("\n");
}
printf("\n");
}
//////////////////////////////////////////////////////////////////////
void midprint (float *campo, int Lx, int Ly, int Lz)
{
printf("%f ", campo[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]);
printf("\n");
}
/////////////////////////////////////////////////////
void cria_arquivo(float *campo, int x, int y, int t, int Lx, int Ly, int Lz)
{
FILE *onda;
//remove("DATOS_ONDA");
onda = fopen ("DATOS_ONDA", "a");
fprintf(onda, "valor de t=%d ******************************************************************** \n",t);
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
fprintf(onda, " i=%d j=%d campo=%f \n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
fclose(onda);
}
|
c0f4ee8b712deb652312ec6ea350e67becaf2b1a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright [2011] [Chris McClanahan]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <hip/hip_runtime.h>
#include <npp.h>
#define CUDA(call) do { \
hipError_t _e = (call); \
if ((_e = hipGetLastError()) != hipSuccess) { \
return printf( "CUDA runtime error: %s\n", \
hipGetErrorString(_e)); \
} hipDeviceSynchronize(); \
} while (0)
#define NPP(call) do { \
NppStatus _e = (call); \
if( NPP_SUCCESS > _e) { \
return printf( "NPP runtime error: "); \
} hipDeviceSynchronize(); \
} while (0)
int main(int argc, char** args) {
// args
int P = 3;
if (argc < P - 1) { return printf("Usage: %s <numel>\n", args[0]); }
// sizes
int numel = atoi(args[P - 2]);
const int bins = 256;
float max = 255;
float min = 0;
// input data
float* h_data = (float*)malloc(numel * sizeof(float));
for (int i = 0; i < numel ; ++i) { h_data[i] = (i % (max + 1)); }
// gpu mem
Npp32f* d_data = nppsMalloc_32f(numel);
Npp32s* pHist = nppsMalloc_32s(bins);
Npp32f* pLevels = nppsMalloc_32f(bins + 1);
CUDA(hipMemcpy(d_data, h_data, numel * sizeof(float), hipMemcpyHostToDevice));
// input data range
float h_minmax[] = {min, max};
float* d_minmax = nppsMalloc_32f(2);
CUDA(hipMemcpy(d_minmax, &h_minmax, 2 * sizeof(float), hipMemcpyHostToDevice));
// bin spacing
float levels[bins + 1];
float scalar = (max - min) / (float)(bins);
for (int i = 0; i < bins + 1 ; ++i) { levels[i] = (i * scalar); }
levels[bins] = FLT_MAX; // last bin is catch all
CUDA(hipMemcpy(pLevels, levels, (bins + 1) * sizeof(float), hipMemcpyHostToDevice));
// nppihist config
int nLevels = bins + 1; // nppihist returns bins-1
int nSrcStep = numel * sizeof(float); // bytes
// nppihist scratch buffer
NppiSize oBuffROI;
oBuffROI.width = numel;
oBuffROI.height = 1;
int buffsize;
NPP(nppiHistogramRangeGetBufferSize_32f_C1R(oBuffROI, nLevels, &buffsize));
Npp8u* pBuffer = nppsMalloc_8u(buffsize);
// nppihist config
NppiSize oSizeROI;
oSizeROI.width = numel;
oSizeROI.height = 1;
// run gpu histogram
NPP(nppiHistogramRange_32f_C1R(d_data,
nSrcStep, oSizeROI,
pHist ,
pLevels, nLevels,
pBuffer
));
// copy back
int* h_hist = (int*)malloc(bins * sizeof(int));
CUDA(hipMemcpy(h_hist, pHist, bins * sizeof(int), hipMemcpyDeviceToHost));
// cpu reference
int* h_ref = (int*)malloc(bins * sizeof(int));
for (int i = 0; i < bins ; ++i) { h_ref[i] = 0; }
for (int i = 0; i < numel ; ++i) {
int idx = (h_data[i] - min) / ((max - min) / (float)bins);
idx -= (idx >= bins);
h_ref[idx]++;
}
// compare/print
for (int i = 0; i < bins ; ++i) {
printf("%d g %d c %d\n", i, h_hist[i], h_ref[i]);
}
// cleanup
free(h_ref);
free(h_data);
free(h_hist);
nppsFree(pBuffer);
nppsFree(pHist);
nppsFree(pLevels);
nppsFree(d_data);
nppsFree(d_minmax);
return 0;
}
| c0f4ee8b712deb652312ec6ea350e67becaf2b1a.cu | /*
Copyright [2011] [Chris McClanahan]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <cuda.h>
#include <npp.h>
#define CUDA(call) do { \
cudaError_t _e = (call); \
if ((_e = cudaGetLastError()) != cudaSuccess) { \
return printf( "CUDA runtime error: %s\n", \
cudaGetErrorString(_e)); \
} cudaThreadSynchronize(); \
} while (0)
#define NPP(call) do { \
NppStatus _e = (call); \
if( NPP_SUCCESS > _e) { \
return printf( "NPP runtime error: "); \
} cudaThreadSynchronize(); \
} while (0)
int main(int argc, char** args) {
// args
int P = 3;
if (argc < P - 1) { return printf("Usage: %s <numel>\n", args[0]); }
// sizes
int numel = atoi(args[P - 2]);
const int bins = 256;
float max = 255;
float min = 0;
// input data
float* h_data = (float*)malloc(numel * sizeof(float));
for (int i = 0; i < numel ; ++i) { h_data[i] = (i % (max + 1)); }
// gpu mem
Npp32f* d_data = nppsMalloc_32f(numel);
Npp32s* pHist = nppsMalloc_32s(bins);
Npp32f* pLevels = nppsMalloc_32f(bins + 1);
CUDA(cudaMemcpy(d_data, h_data, numel * sizeof(float), cudaMemcpyHostToDevice));
// input data range
float h_minmax[] = {min, max};
float* d_minmax = nppsMalloc_32f(2);
CUDA(cudaMemcpy(d_minmax, &h_minmax, 2 * sizeof(float), cudaMemcpyHostToDevice));
// bin spacing
float levels[bins + 1];
float scalar = (max - min) / (float)(bins);
for (int i = 0; i < bins + 1 ; ++i) { levels[i] = (i * scalar); }
levels[bins] = FLT_MAX; // last bin is catch all
CUDA(cudaMemcpy(pLevels, levels, (bins + 1) * sizeof(float), cudaMemcpyHostToDevice));
// nppihist config
int nLevels = bins + 1; // nppihist returns bins-1
int nSrcStep = numel * sizeof(float); // bytes
// nppihist scratch buffer
NppiSize oBuffROI;
oBuffROI.width = numel;
oBuffROI.height = 1;
int buffsize;
NPP(nppiHistogramRangeGetBufferSize_32f_C1R(oBuffROI, nLevels, &buffsize));
Npp8u* pBuffer = nppsMalloc_8u(buffsize);
// nppihist config
NppiSize oSizeROI;
oSizeROI.width = numel;
oSizeROI.height = 1;
// run gpu histogram
NPP(nppiHistogramRange_32f_C1R(d_data,
nSrcStep, oSizeROI,
pHist ,
pLevels, nLevels,
pBuffer
));
// copy back
int* h_hist = (int*)malloc(bins * sizeof(int));
CUDA(cudaMemcpy(h_hist, pHist, bins * sizeof(int), cudaMemcpyDeviceToHost));
// cpu reference
int* h_ref = (int*)malloc(bins * sizeof(int));
for (int i = 0; i < bins ; ++i) { h_ref[i] = 0; }
for (int i = 0; i < numel ; ++i) {
int idx = (h_data[i] - min) / ((max - min) / (float)bins);
idx -= (idx >= bins);
h_ref[idx]++;
}
// compare/print
for (int i = 0; i < bins ; ++i) {
printf("%d g %d c %d\n", i, h_hist[i], h_ref[i]);
}
// cleanup
free(h_ref);
free(h_data);
free(h_hist);
nppsFree(pBuffer);
nppsFree(pHist);
nppsFree(pLevels);
nppsFree(d_data);
nppsFree(d_minmax);
return 0;
}
|
223de47f4d3014947d75a52ceb2bbf3772a5e712.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory, but
// the resulting modified data is never copied back to global memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
/*
* First, call a kernel that shows using local memory
*/
hipLaunchKernelGGL(( use_local_memory_GPU), dim3(1), dim3(128), 0, 0, 2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
hipMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
hipMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, hipMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
hipLaunchKernelGGL(( use_global_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents of h_arr
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, hipMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
hipLaunchKernelGGL(( use_shared_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr);
// copy the modified array back to the host
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, hipMemcpyHostToDevice);
// ... do other stuff ...
return 0;
}
| 223de47f4d3014947d75a52ceb2bbf3772a5e712.cu | // Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory, but
// the resulting modified data is never copied back to global memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
/*
* First, call a kernel that shows using local memory
*/
use_local_memory_GPU<<<1, 128>>>(2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
cudaMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
use_global_memory_GPU<<<1, 128>>>(d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents of h_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
use_shared_memory_GPU<<<1, 128>>>(d_arr);
// copy the modified array back to the host
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
// ... do other stuff ...
return 0;
}
|
413943da5bc41bf3817d1f3c4e39526abd0b0179.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void reverse_kernel(int *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size / 2){
return;
}
extern __shared__ int a_copy[];
a_copy[idx] = a[idx];
a_copy[size - 1 - idx] = a[size - 1 - idx];
__syncthreads();
int t = a_copy[idx];
a_copy[idx] = a_copy[size - 1 - idx];
a_copy[size - 1 - idx] = t;
__syncthreads();
a[idx] = a_copy[idx];
a[size - 1 - idx] = a_copy[size - 1 - idx];
}
int main(int argc, char **argv){
int n = atoi(argv[1]);
int *h_a;
size_t bytes = n * sizeof(int);
h_a = (int *) malloc(bytes);
for (int i = 0; i < n; i++){
h_a[i] = i;
}
int *d_a;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMalloc(&d_a, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = 1;
hipLaunchKernelGGL(( reverse_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_a, n);
hipDeviceSynchronize();
hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << endl;
hipFree(d_a);
return 0;
}
| 413943da5bc41bf3817d1f3c4e39526abd0b0179.cu | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
__global__ void reverse_kernel(int *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size / 2){
return;
}
extern __shared__ int a_copy[];
a_copy[idx] = a[idx];
a_copy[size - 1 - idx] = a[size - 1 - idx];
__syncthreads();
int t = a_copy[idx];
a_copy[idx] = a_copy[size - 1 - idx];
a_copy[size - 1 - idx] = t;
__syncthreads();
a[idx] = a_copy[idx];
a[size - 1 - idx] = a_copy[size - 1 - idx];
}
int main(int argc, char **argv){
int n = atoi(argv[1]);
int *h_a;
size_t bytes = n * sizeof(int);
h_a = (int *) malloc(bytes);
for (int i = 0; i < n; i++){
h_a[i] = i;
}
int *d_a;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc(&d_a, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = 1;
reverse_kernel<<<gridSize, blockSize>>>(d_a, n);
cudaDeviceSynchronize();
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << endl;
cudaFree(d_a);
return 0;
}
|
e3f5fe005d40e27993470090188241ea6b19cad2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstdio>
#include <vector>
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/fluid/operators/top_k_op.h"
#include "paddle/fluid/platform/float16.h"
// set cub base traits in order to handle float16
namespace paddle {
namespace operators {
using Tensor = phi::DenseTensor;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_MAXLENGTH_BASE(MaxLength, ...) \
case (MaxLength): { \
constexpr auto maxLength = (MaxLength); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
#define FIXED_MAXLENGTH(...) \
FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__)
template <typename DeviceContext, typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()),
true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* input = ctx.Input<phi::DenseTensor>("X");
auto* output = ctx.Output<phi::DenseTensor>("Out");
auto* indices = ctx.Output<phi::DenseTensor>("Indices");
int k = static_cast<int>(ctx.Attr<int>("k"));
auto* k_t = ctx.Input<phi::DenseTensor>("K");
if (k_t) {
Tensor k_host;
framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host);
k = k_host.data<int>()[0];
framework::DDim output_dims = output->dims();
output_dims[output_dims.size() - 1] = k;
output->Resize(output_dims);
indices->Resize(output_dims);
}
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
framework::DDim inputdims = input->dims();
const int64_t input_height =
phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1));
const int64_t input_width = inputdims[inputdims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(
dev_ctx, input, input_width, input_height, k, output, indices)) {
// Successed, return.
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
paddle::platform::GpuLaunchConfig config =
paddle::platform::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
FIXED_BLOCK_DIM(switch (getMaxLength(k)) {
FIXED_MAXLENGTH(
hipLaunchKernelGGL(( KeMatrixTopK<T, maxLength, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input k has error when use getMaxLength function to get the "
"maxLength."));
});
default:
PADDLE_THROW(platform::errors::Unavailable(
"Calculation error occurred in TopK Operator's CUDA Kernel."));
}
}
};
template <typename DeviceContext, typename T>
class TopkOpGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()),
true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* x = context.Input<phi::DenseTensor>("X");
auto* out_grad =
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto* indices = context.Input<phi::DenseTensor>("Indices");
auto* x_grad =
context.Output<phi::DenseTensor>(framework::GradVarName("X"));
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const int64_t* indices_data = indices->data<int64_t>();
size_t k = indices->dims()[indices->dims().size() - 1];
framework::DDim xdims = x->dims();
const size_t row =
phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1));
const size_t col = xdims[xdims.size() - 1];
const auto& dev_ctx = context.cuda_device_context();
const int kMaxHeight = 2048;
int gridx = row < kMaxHeight ? row : kMaxHeight;
switch (GetDesiredBlockDim(col)) {
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( AssignGrad<T, 5, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
x_grad_data, indices_data, out_grad_data, row, col, k));
default:
PADDLE_THROW(
platform::errors::Unavailable("Error occurs when Assign Grad."));
}
}
};
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
top_k,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, float>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, double>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int64_t>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
top_k_grad,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, float>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, double>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int64_t>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext,
paddle::platform::float16>);
| e3f5fe005d40e27993470090188241ea6b19cad2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstdio>
#include <vector>
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/fluid/operators/top_k_op.h"
#include "paddle/fluid/platform/float16.h"
// set cub base traits in order to handle float16
namespace paddle {
namespace operators {
using Tensor = phi::DenseTensor;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_MAXLENGTH_BASE(MaxLength, ...) \
case (MaxLength): { \
constexpr auto maxLength = (MaxLength); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
#define FIXED_MAXLENGTH(...) \
FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \
FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__)
template <typename DeviceContext, typename T>
class TopkOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()),
true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* input = ctx.Input<phi::DenseTensor>("X");
auto* output = ctx.Output<phi::DenseTensor>("Out");
auto* indices = ctx.Output<phi::DenseTensor>("Indices");
int k = static_cast<int>(ctx.Attr<int>("k"));
auto* k_t = ctx.Input<phi::DenseTensor>("K");
if (k_t) {
Tensor k_host;
framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host);
k = k_host.data<int>()[0];
framework::DDim output_dims = output->dims();
output_dims[output_dims.size() - 1] = k;
output->Resize(output_dims);
indices->Resize(output_dims);
}
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
framework::DDim inputdims = input->dims();
const int64_t input_height =
phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1));
const int64_t input_width = inputdims[inputdims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(
dev_ctx, input, input_width, input_height, k, output, indices)) {
// Successed, return.
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): refine this kernel.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
paddle::platform::GpuLaunchConfig config =
paddle::platform::GetGpuLaunchConfig1D(dev_ctx, input_width);
switch (config.thread_per_block.x) {
FIXED_BLOCK_DIM(switch (getMaxLength(k)) {
FIXED_MAXLENGTH(
KeMatrixTopK<T, maxLength, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input k has error when use getMaxLength function to get the "
"maxLength."));
});
default:
PADDLE_THROW(platform::errors::Unavailable(
"Calculation error occurred in TopK Operator's CUDA Kernel."));
}
}
};
template <typename DeviceContext, typename T>
class TopkOpGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()),
true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* x = context.Input<phi::DenseTensor>("X");
auto* out_grad =
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto* indices = context.Input<phi::DenseTensor>("Indices");
auto* x_grad =
context.Output<phi::DenseTensor>(framework::GradVarName("X"));
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const int64_t* indices_data = indices->data<int64_t>();
size_t k = indices->dims()[indices->dims().size() - 1];
framework::DDim xdims = x->dims();
const size_t row =
phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1));
const size_t col = xdims[xdims.size() - 1];
const auto& dev_ctx = context.cuda_device_context();
const int kMaxHeight = 2048;
int gridx = row < kMaxHeight ? row : kMaxHeight;
switch (GetDesiredBlockDim(col)) {
FIXED_BLOCK_DIM(
AssignGrad<T, 5, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
x_grad_data, indices_data, out_grad_data, row, col, k));
default:
PADDLE_THROW(
platform::errors::Unavailable("Error occurs when Assign Grad."));
}
}
};
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
top_k,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, float>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, double>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int64_t>,
paddle::operators::TopkOpCUDAKernel<phi::GPUContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
top_k_grad,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, float>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, double>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int64_t>,
paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext,
paddle::platform::float16>);
|
c33e40586ef18abed74034432183f469fca205a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
short *arg5_maps,
short *arg6_maps,
short *arg7_maps,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
double arg6_l[4];
double arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_double;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg1_s+arg3_maps[n+offset_b]*4,
ind_arg2_s+arg4_maps[n+offset_b]*1,
ind_arg2_s+arg5_maps[n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map = arg6_maps[n+offset_b];
int arg7_map = arg7_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7};
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg4.data_d, Plan->ind_maps[2],
(double *)arg6.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
Plan->loc_maps[5],
Plan->loc_maps[6],
Plan->loc_maps[7],
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
OP_kernels[2].time += wall_t2 - wall_t1;
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
| c33e40586ef18abed74034432183f469fca205a9.cu | //
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
short *arg5_maps,
short *arg6_maps,
short *arg7_maps,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
double arg6_l[4];
double arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_double;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg1_s+arg3_maps[n+offset_b]*4,
ind_arg2_s+arg4_maps[n+offset_b]*1,
ind_arg2_s+arg5_maps[n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map = arg6_maps[n+offset_b];
int arg7_map = arg7_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7};
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg4.data_d, Plan->ind_maps[2],
(double *)arg6.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
Plan->loc_maps[5],
Plan->loc_maps[6],
Plan->loc_maps[7],
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
OP_kernels[2].time += wall_t2 - wall_t1;
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
|
488e9e0b5b0966b2210fa582dc0ca588fdf9518f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// main.cpp
// pi_with_cuda
//
// Created by Mirco Meazzo on 21/10/2019.
// Copyright 2019 Mirco Meazzo. All rights reserved.
//
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <typeinfo>
#define NLIM 10000000
__global__ void compute_r(int *mem, double *rand_real, double *rand_imag ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_blocks= gridDim.x;
int stride= blockDim.x * total_blocks;
for (int i=index; i<(int(NLIM)); i+=stride) {
if ((sqrt(rand_real[i]*rand_real[i] + rand_imag[i]*rand_imag[i])) <= 1.0f) {
mem[i] = 1;
}
else
mem[i] = 0;
}
}
__global__ void reduction(int *mem, int *res) {
// Copy from global memory to shared memory the values
__shared__ int mem_gpu[512];
int tid = threadIdx.x;
mem_gpu[tid] = mem[tid + blockDim.x*blockIdx.x];
__syncthreads();
// Wait all threads within the block
// Start memory reduction process
if (blockDim.x >= 512) {
if (tid < 256) {
mem_gpu[tid] += mem_gpu[tid + 256];
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
mem_gpu[tid] += mem_gpu[tid + 128];
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
mem_gpu[tid] += mem_gpu[tid + 64];
}
__syncthreads();
}
if (tid < 32) { // Instruction within warps scope
volatile int *smem_gpu = mem_gpu; // Volatile means no schedule optimization, we're freezing
// the status on these 64 threads
smem_gpu[tid] += smem_gpu[tid + 32]; // Warps are synchronized, these rows are executed
smem_gpu[tid] += smem_gpu[tid + 16]; // one by one, no need of further sync
smem_gpu[tid] += smem_gpu[tid + 8];
smem_gpu[tid] += smem_gpu[tid + 4];
smem_gpu[tid] += smem_gpu[tid + 2];
smem_gpu[tid] += smem_gpu[tid + 1];
}
if (tid == 0) {
res[blockIdx.x] = mem_gpu[tid];
}
}
int main(int argc, const char * argv[]) {
std::cout << "Refine Pi using " << NLIM << " iterations" << std::endl;
double pi;
int *gpu_inner;
double *rand_imag, *rand_real;
// gpu_inner = new int[NLIM];
// rand_real = new double[NLIM];
// rand_imag = new double[NLIM];
hipMallocManaged(&gpu_inner,int(NLIM)*sizeof(int));
hipMallocManaged(&rand_real,int(NLIM)*sizeof(double));
hipMallocManaged(&rand_imag,int(NLIM)*sizeof(double));
for (int i=0; i<(int(NLIM )-1); i++) {
rand_real[i] = double(rand()) / double(RAND_MAX);
rand_imag[i] = double(rand()) / double(RAND_MAX);
}
int block_size = 128;
int n_blocks = (int(NLIM) + block_size - 1) / block_size;
int *inner;
hipMallocManaged(&inner, n_blocks*sizeof(int));
std::cout << "Executing Kernel with " << block_size << " threads on " << n_blocks << " blocks" << std::endl;
hipLaunchKernelGGL(( compute_r) , dim3(n_blocks), dim3(block_size), 0, 0, gpu_inner, rand_real, rand_imag);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduction) , dim3(n_blocks), dim3(block_size), 0, 0, gpu_inner,inner);
// compute_r (gpu_inner,rand_real,rand_imag);
hipDeviceSynchronize();
for (int i=1; i<n_blocks; i++) {
inner[0] += inner[i];
}
pi= 4.0f* (inner[0]/double(NLIM));
std::cout << "Pi is " << pi << std::endl;
hipFree(gpu_inner);
hipFree(rand_imag);
hipFree(rand_real);
return 0;
}
| 488e9e0b5b0966b2210fa582dc0ca588fdf9518f.cu | //
// main.cpp
// pi_with_cuda
//
// Created by Mirco Meazzo on 21/10/2019.
// Copyright © 2019 Mirco Meazzo. All rights reserved.
//
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <typeinfo>
#define NLIM 10000000
__global__ void compute_r(int *mem, double *rand_real, double *rand_imag ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_blocks= gridDim.x;
int stride= blockDim.x * total_blocks;
for (int i=index; i<(int(NLIM)); i+=stride) {
if ((sqrt(rand_real[i]*rand_real[i] + rand_imag[i]*rand_imag[i])) <= 1.0f) {
mem[i] = 1;
}
else
mem[i] = 0;
}
}
__global__ void reduction(int *mem, int *res) {
// Copy from global memory to shared memory the values
__shared__ int mem_gpu[512];
int tid = threadIdx.x;
mem_gpu[tid] = mem[tid + blockDim.x*blockIdx.x];
__syncthreads();
// Wait all threads within the block
// Start memory reduction process
if (blockDim.x >= 512) {
if (tid < 256) {
mem_gpu[tid] += mem_gpu[tid + 256];
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
mem_gpu[tid] += mem_gpu[tid + 128];
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
mem_gpu[tid] += mem_gpu[tid + 64];
}
__syncthreads();
}
if (tid < 32) { // Instruction within warps scope
volatile int *smem_gpu = mem_gpu; // Volatile means no schedule optimization, we're freezing
// the status on these 64 threads
smem_gpu[tid] += smem_gpu[tid + 32]; // Warps are synchronized, these rows are executed
smem_gpu[tid] += smem_gpu[tid + 16]; // one by one, no need of further sync
smem_gpu[tid] += smem_gpu[tid + 8];
smem_gpu[tid] += smem_gpu[tid + 4];
smem_gpu[tid] += smem_gpu[tid + 2];
smem_gpu[tid] += smem_gpu[tid + 1];
}
if (tid == 0) {
res[blockIdx.x] = mem_gpu[tid];
}
}
int main(int argc, const char * argv[]) {
std::cout << "Refine Pi using " << NLIM << " iterations" << std::endl;
double pi;
int *gpu_inner;
double *rand_imag, *rand_real;
// gpu_inner = new int[NLIM];
// rand_real = new double[NLIM];
// rand_imag = new double[NLIM];
cudaMallocManaged(&gpu_inner,int(NLIM)*sizeof(int));
cudaMallocManaged(&rand_real,int(NLIM)*sizeof(double));
cudaMallocManaged(&rand_imag,int(NLIM)*sizeof(double));
for (int i=0; i<(int(NLIM )-1); i++) {
rand_real[i] = double(rand()) / double(RAND_MAX);
rand_imag[i] = double(rand()) / double(RAND_MAX);
}
int block_size = 128;
int n_blocks = (int(NLIM) + block_size - 1) / block_size;
int *inner;
cudaMallocManaged(&inner, n_blocks*sizeof(int));
std::cout << "Executing Kernel with " << block_size << " threads on " << n_blocks << " blocks" << std::endl;
compute_r <<<n_blocks, block_size>>> (gpu_inner, rand_real, rand_imag);
cudaDeviceSynchronize();
reduction <<<n_blocks, block_size>>> (gpu_inner,inner);
// compute_r (gpu_inner,rand_real,rand_imag);
cudaDeviceSynchronize();
for (int i=1; i<n_blocks; i++) {
inner[0] += inner[i];
}
pi= 4.0f* (inner[0]/double(NLIM));
std::cout << "Pi is " << pi << std::endl;
cudaFree(gpu_inner);
cudaFree(rand_imag);
cudaFree(rand_real);
return 0;
}
|
74aa9cf3ef9bb01b7516568f599d1ada1564d3c3.hip | // !!! This is a file automatically generated by hipify!!!
// Compile with nvcc blur-effect.cu -o blur-effect -I /usr/local/cuda/samples/common/inc -lm to include helper_cuda.h
#include "file_system.h"
#define M_PI 3.14159265358979323846
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* bsize represents the block size of the blockwise sistem
* threads represent the number of threads of the algorithm
* ksize represents the kernel size
* sigma represents the standard deviation of the kernel
*/
int bsize = 32, ksize = 15;
double threadsPerCore = 2;
double sigma = 10;
Image *img, *newImg;
double * * kernel;
double * * gaussianKernel(int size){
double mean = size/2;
double * * kernel = (double * *)malloc(size * sizeof(double *));
for(int i = 0; i < size; i++){
kernel[i] = (double *)malloc(size * sizeof(double));
for(int j = 0; j < size; j++){
kernel[i][j] = exp( -0.5 * (pow((i-mean)/sigma, 2.0) + pow((j-mean)/sigma,2.0)))
/ (2 * M_PI * sigma * sigma);
}
}
return kernel;
}
__global__
void blur(uint8_t * color, uint8_t * newColor, double * kernel, int ksize, int imgWidth, int imgHeight, double iterations){
int i;
// Put on shared memory the kernel
extern __shared__ double skernel[];
for (i = threadIdx.x; i < ksize*ksize; i += blockDim.x) {
skernel[i] = kernel[i];
}
__syncthreads();
int index = blockIdx.x*blockDim.x+threadIdx.x;
int x, y, k, l;
double sum, cColor;
for (i = iterations*index; i < iterations*(index + 1) && i < imgWidth*imgHeight; i++){
x = i % imgWidth;
y = i / imgWidth;
sum = cColor = 0;
// For each location in kernel
for(k = ksize/-2; k <= ksize/2; k++) for(l = ksize/-2; l <= ksize/2; l++) {
if(y + k < 0 || y + k >= imgHeight || x + l < 0 || x + l >= imgWidth) continue;
cColor += color[(y + k)*imgWidth + x + l] * skernel[(k + ksize/2)*ksize + l + ksize/2];
// For normalization
sum += skernel[(k + ksize/2)*ksize + l + ksize/2];
}
newColor[i] = cColor/sum;
}
}
int main(int argc, char **argv) {
// For time stamp
struct timeval start, stop, diff, pstart, pend;
gettimeofday(&start, NULL);
char * imgname = argv[1]; // "GrandImg.jpg";//"img2.jpg";//"icon.png";//"GrandImg.jpg";
char * newImgName = argv[2]; //"GrandImgBlur.jpg";//"img2Blur.jpg";//"iconBlur.png";//"GrandImgBlur.jpg";
ksize = atoi(argv[3]);
threadsPerCore = atof(argv[4]);
// Even kernel size
if((ksize & 1) == 0){
printf("Detectado tamao de kernel par (%d). Solo se pueden usar tamaos impares de kernel por lo que se usar %d\n", ksize, ksize + 1);
ksize++;
}
// Image Lecture
Image * img = (Image *)malloc(sizeof(Image));
if(!readImage(imgname, img)){
printf("%s: ERROR LEYENDO IMAGEN\n", imgname);
return 0;
}
printf("Imagen %s cargada\n", imgname);
// Kernel
kernel = gaussianKernel(ksize);
///////////////////////////////////////Parallel setup/////////////////////////////////////
//CARD INFO
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int BLOCKS = 2 * deviceProp.multiProcessorCount, THREADS = threadsPerCore * _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
// Alloc Kernel in memory
double * kDevice;
hipMalloc(&kDevice, ksize*ksize*sizeof(double));
for(int i = 0; i < ksize; i++){
hipMemcpy(kDevice + i*ksize, kernel[i], ksize*sizeof(double), hipMemcpyHostToDevice);
}
// Memory
int arrSize = img->width*img->height*sizeof(uint8_t);
uint8_t * colorDevice, * colorNewDevice;
hipMalloc(&colorDevice, arrSize);
hipMalloc(&colorNewDevice, arrSize);
double iterations = (1.0*img->width*img->height)/(BLOCKS * THREADS);
if(BLOCKS * THREADS > img->width*img->height) iterations = 1;
int sharedMemory = ksize*ksize*sizeof(double);
printf("Running with %d threads, %d BLocks, %d ksize, %f iterations\n", THREADS, BLOCKS, ksize, iterations);
uint8_t * currentPointer;
gettimeofday(&pstart, NULL);
for (int i = 0; i < 3; i++){
printf("Running color %d\n", i);
// Alloc Image in memory;
switch(i){
case 0: currentPointer = img->red; break;
case 1: currentPointer = img->blue; break;
case 2: currentPointer = img->green; break;
}
hipMemcpy(colorDevice, currentPointer, arrSize, hipMemcpyHostToDevice);
// Execution
hipLaunchKernelGGL(( blur), dim3(BLOCKS), dim3(THREADS), sharedMemory, 0, colorDevice, colorNewDevice, kDevice, ksize, img->width, img->height, iterations);
// Copy results
hipMemcpy(currentPointer, colorNewDevice, arrSize, hipMemcpyDeviceToHost);
}
gettimeofday(&pend, NULL);
timersub(&pend, &pstart, &diff);
printf("Tiempo de procesamiento: %ld.%06ld\n", (long int) diff.tv_sec, (long int) diff.tv_usec);
// Save new image
printf("Guardando imagen\n");
writeImage(img, newImgName);
// Free
hipFree(colorDevice);
hipFree(colorNewDevice);
freeImage(img);
//Stadistics
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
printf("Tiempo: %ld.%06ld\n", (long int) diff.tv_sec, (long int) diff.tv_usec);
return 0;
}
| 74aa9cf3ef9bb01b7516568f599d1ada1564d3c3.cu | // Compile with nvcc blur-effect.cu -o blur-effect -I /usr/local/cuda/samples/common/inc -lm to include helper_cuda.h
#include "file_system.h"
#define M_PI 3.14159265358979323846
#include <sys/time.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <helper_cuda.h>
/**
* bsize represents the block size of the blockwise sistem
* threads represent the number of threads of the algorithm
* ksize represents the kernel size
* sigma represents the standard deviation of the kernel
*/
int bsize = 32, ksize = 15;
double threadsPerCore = 2;
double sigma = 10;
Image *img, *newImg;
double * * kernel;
double * * gaussianKernel(int size){
double mean = size/2;
double * * kernel = (double * *)malloc(size * sizeof(double *));
for(int i = 0; i < size; i++){
kernel[i] = (double *)malloc(size * sizeof(double));
for(int j = 0; j < size; j++){
kernel[i][j] = exp( -0.5 * (pow((i-mean)/sigma, 2.0) + pow((j-mean)/sigma,2.0)))
/ (2 * M_PI * sigma * sigma);
}
}
return kernel;
}
__global__
void blur(uint8_t * color, uint8_t * newColor, double * kernel, int ksize, int imgWidth, int imgHeight, double iterations){
int i;
// Put on shared memory the kernel
extern __shared__ double skernel[];
for (i = threadIdx.x; i < ksize*ksize; i += blockDim.x) {
skernel[i] = kernel[i];
}
__syncthreads();
int index = blockIdx.x*blockDim.x+threadIdx.x;
int x, y, k, l;
double sum, cColor;
for (i = iterations*index; i < iterations*(index + 1) && i < imgWidth*imgHeight; i++){
x = i % imgWidth;
y = i / imgWidth;
sum = cColor = 0;
// For each location in kernel
for(k = ksize/-2; k <= ksize/2; k++) for(l = ksize/-2; l <= ksize/2; l++) {
if(y + k < 0 || y + k >= imgHeight || x + l < 0 || x + l >= imgWidth) continue;
cColor += color[(y + k)*imgWidth + x + l] * skernel[(k + ksize/2)*ksize + l + ksize/2];
// For normalization
sum += skernel[(k + ksize/2)*ksize + l + ksize/2];
}
newColor[i] = cColor/sum;
}
}
int main(int argc, char **argv) {
// For time stamp
struct timeval start, stop, diff, pstart, pend;
gettimeofday(&start, NULL);
char * imgname = argv[1]; // "GrandImg.jpg";//"img2.jpg";//"icon.png";//"GrandImg.jpg";
char * newImgName = argv[2]; //"GrandImgBlur.jpg";//"img2Blur.jpg";//"iconBlur.png";//"GrandImgBlur.jpg";
ksize = atoi(argv[3]);
threadsPerCore = atof(argv[4]);
// Even kernel size
if((ksize & 1) == 0){
printf("Detectado tamaño de kernel par (%d). Solo se pueden usar tamaños impares de kernel por lo que se usará %d\n", ksize, ksize + 1);
ksize++;
}
// Image Lecture
Image * img = (Image *)malloc(sizeof(Image));
if(!readImage(imgname, img)){
printf("%s: ERROR LEYENDO IMAGEN\n", imgname);
return 0;
}
printf("Imagen %s cargada\n", imgname);
// Kernel
kernel = gaussianKernel(ksize);
///////////////////////////////////////Parallel setup/////////////////////////////////////
//CARD INFO
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int BLOCKS = 2 * deviceProp.multiProcessorCount, THREADS = threadsPerCore * _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
// Alloc Kernel in memory
double * kDevice;
cudaMalloc(&kDevice, ksize*ksize*sizeof(double));
for(int i = 0; i < ksize; i++){
cudaMemcpy(kDevice + i*ksize, kernel[i], ksize*sizeof(double), cudaMemcpyHostToDevice);
}
// Memory
int arrSize = img->width*img->height*sizeof(uint8_t);
uint8_t * colorDevice, * colorNewDevice;
cudaMalloc(&colorDevice, arrSize);
cudaMalloc(&colorNewDevice, arrSize);
double iterations = (1.0*img->width*img->height)/(BLOCKS * THREADS);
if(BLOCKS * THREADS > img->width*img->height) iterations = 1;
int sharedMemory = ksize*ksize*sizeof(double);
printf("Running with %d threads, %d BLocks, %d ksize, %f iterations\n", THREADS, BLOCKS, ksize, iterations);
uint8_t * currentPointer;
gettimeofday(&pstart, NULL);
for (int i = 0; i < 3; i++){
printf("Running color %d\n", i);
// Alloc Image in memory;
switch(i){
case 0: currentPointer = img->red; break;
case 1: currentPointer = img->blue; break;
case 2: currentPointer = img->green; break;
}
cudaMemcpy(colorDevice, currentPointer, arrSize, cudaMemcpyHostToDevice);
// Execution
blur<<<BLOCKS, THREADS, sharedMemory>>>(colorDevice, colorNewDevice, kDevice, ksize, img->width, img->height, iterations);
// Copy results
cudaMemcpy(currentPointer, colorNewDevice, arrSize, cudaMemcpyDeviceToHost);
}
gettimeofday(&pend, NULL);
timersub(&pend, &pstart, &diff);
printf("Tiempo de procesamiento: %ld.%06ld\n", (long int) diff.tv_sec, (long int) diff.tv_usec);
// Save new image
printf("Guardando imagen\n");
writeImage(img, newImgName);
// Free
cudaFree(colorDevice);
cudaFree(colorNewDevice);
freeImage(img);
//Stadistics
gettimeofday(&stop, NULL);
timersub(&stop, &start, &diff);
printf("Tiempo: %ld.%06ld\n", (long int) diff.tv_sec, (long int) diff.tv_usec);
return 0;
}
|
2758fcb060fd72f52d02d78a362f9ae87190dcdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_forward_template_helpers.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
enum {
DEVICE = 0,
MANAGED = 1,
MANAGED_CACHING = 2,
};
constexpr size_t kForwardMaxThreads = 512;
using namespace at;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename cache_t,
typename index_t,
size_t kMaxVecsPerThread>
__launch_bounds__(kForwardMaxThreads)
__global__ void {{ "dense" if dense else "split" }}_embedding_codegen_forward_{{ wdesc }}_kernel(
const PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
const PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
const PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits>
lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
lxu_cache_locations,
{% endif %}
PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int64_t weights_offset = weights_offsets[t];
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
index_t indices_start = offsets[t * B + b];
index_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
const emb_t* __restrict__ weights;
{% if not dense %}
const auto placement = weights_placements[t];
if (placement == DEVICE) {
weights = &dev_weights[weights_offset];
} else {
weights = &uvm_weights[weights_offset];
}
{% else %}
weights = &dev_weights[weights_offset];
{% endif %}
Vec4T<cache_t> accumulators[kMaxVecsPerThread];
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
int64_t idx = l < L ? indices[indices_start + l] : 0;
{% if not dense %}
int32_t cache_idx = (placement == MANAGED_CACHING && l < L) ? lxu_cache_locations[indices_start + l] : 0;
{% endif %}
{% if weighted %}
acc_type<cache_t, true> idx_weight = l < L ? indice_weights[indices_start + l] : 0;
{% endif %}
for (auto j = 0; j < kWarpSize && l_start + j < L; ++j) {
int64_t idx_j = __shfl_sync(0xFFFFFFFF, idx, j);
{% if not dense %}
int32_t cache_idx_j = __shfl_sync(0xFFFFFFFF, cache_idx, j);
{% endif %}
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
{% if not dense %}
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(const_cast<emb_t*>(&weights[idx_j * D]), const_cast<cache_t*>(&lxu_cache_weights[cache_idx_j][0]), nullptr);
{% endif %}
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(const_cast<emb_t*>(&weights[idx_j * D]), nullptr, nullptr);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
{% if not dense %}
if (placement == MANAGED_CACHING && cache_idx_j != kCacheLocationMissing) {
Vec4T<cache_t> weight = weight_row_cache.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
} else {
Vec4T<cache_t> weight = weight_row_emb.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
}
{% else %}
Vec4T<cache_t> weight = weight_row_emb.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
{% endif %}
}
}
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
if (pooling_mode == MEAN && L != 0) {
accumulators[i].acc.x /= L;
accumulators[i].acc.y /= L;
accumulators[i].acc.z /= L;
accumulators[i].acc.w /= L;
}
accumulators[i].store(&output[b][D_start + d]);
}
}
Tensor {{ "dense" if dense else "split" }}_embedding_codegen_forward_{{ wdesc }}_cuda(
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t total_D,
int64_t max_D,
Tensor indices,
Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused
) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(dev_weights.get_device());
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(total_D > 0);
TORCH_CHECK(total_D % 4 == 0);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
at::Tensor output;
if (dev_weights.type().scalarType() == at::kHalf) {
output = empty({B, total_D}, dev_weights.options().dtype(at::kFloat));
} else {
output = empty({B, total_D}, dev_weights.options());
}
{% if not dense %}
DISPATCH_EMB_CACHE_TYPES(
{% else %}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
{% endif %}
dev_weights.type(),
{% if not dense %}
lxu_cache_weights.type(),
{% endif %}
"batched_embedding_forward_kernel_2", ([&] {
{% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %}
if (max_D <= {{ 128 * kMaxVecsPerThread }}) {
{% if not dense %}
split_embedding_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel<emb_t, cache_t, int64_t, {{ kMaxVecsPerThread }}>),
{% else %}
dense_embedding_codegen_forward_{{ wdesc }}_kernel<scalar_t, dim3(scalar_t), int64_t, {{ kMaxVecsPerThread }}>,
{% endif %}
div_round_up((B * T), kForwardMaxThreads / kWarpSize),
dim3(kWarpSize, kForwardMaxThreads / kWarpSize),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 0, 0, 0, 0,
dev_weights.packed_accessor64<{{ "scalar_t" if dense else "emb_t" }}, 1, RestrictPtrTraits>(),
{% if not dense %}
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
pooling_mode,
{% if weighted %}
indice_weights.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
output.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>()
);
return;
}
{% endfor %}
}));
C10_HIP_KERNEL_LAUNCH_CHECK();
return output;
}
| 2758fcb060fd72f52d02d78a362f9ae87190dcdf.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_forward_template_helpers.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
enum {
DEVICE = 0,
MANAGED = 1,
MANAGED_CACHING = 2,
};
constexpr size_t kForwardMaxThreads = 512;
using namespace at;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename cache_t,
typename index_t,
size_t kMaxVecsPerThread>
__launch_bounds__(kForwardMaxThreads)
__global__ void {{ "dense" if dense else "split" }}_embedding_codegen_forward_{{ wdesc }}_kernel(
const PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
const PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
const PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits>
lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices,
const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets,
int64_t pooling_mode,
{% if weighted %}
PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits>
indice_weights,
{% endif %}
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
lxu_cache_locations,
{% endif %}
PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
output // [B][total_D],
) {
int32_t B = output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int64_t weights_offset = weights_offsets[t];
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
index_t indices_start = offsets[t * B + b];
index_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
const emb_t* __restrict__ weights;
{% if not dense %}
const auto placement = weights_placements[t];
if (placement == DEVICE) {
weights = &dev_weights[weights_offset];
} else {
weights = &uvm_weights[weights_offset];
}
{% else %}
weights = &dev_weights[weights_offset];
{% endif %}
Vec4T<cache_t> accumulators[kMaxVecsPerThread];
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
int64_t idx = l < L ? indices[indices_start + l] : 0;
{% if not dense %}
int32_t cache_idx = (placement == MANAGED_CACHING && l < L) ? lxu_cache_locations[indices_start + l] : 0;
{% endif %}
{% if weighted %}
acc_type<cache_t, true> idx_weight = l < L ? indice_weights[indices_start + l] : 0;
{% endif %}
for (auto j = 0; j < kWarpSize && l_start + j < L; ++j) {
int64_t idx_j = __shfl_sync(0xFFFFFFFF, idx, j);
{% if not dense %}
int32_t cache_idx_j = __shfl_sync(0xFFFFFFFF, cache_idx, j);
{% endif %}
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
{% if not dense %}
auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>(const_cast<emb_t*>(&weights[idx_j * D]), const_cast<cache_t*>(&lxu_cache_weights[cache_idx_j][0]), nullptr);
{% endif %}
auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>(const_cast<emb_t*>(&weights[idx_j * D]), nullptr, nullptr);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
{% if not dense %}
if (placement == MANAGED_CACHING && cache_idx_j != kCacheLocationMissing) {
Vec4T<cache_t> weight = weight_row_cache.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
} else {
Vec4T<cache_t> weight = weight_row_emb.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
}
{% else %}
Vec4T<cache_t> weight = weight_row_emb.load(d);
{% if weighted %}
accumulators[i].fma_(weight, idx_weight_j);
{% else %}
accumulators[i].acc.x += weight.acc.x;
accumulators[i].acc.y += weight.acc.y;
accumulators[i].acc.z += weight.acc.z;
accumulators[i].acc.w += weight.acc.w;
{% endif %}
{% endif %}
}
}
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
if (pooling_mode == MEAN && L != 0) {
accumulators[i].acc.x /= L;
accumulators[i].acc.y /= L;
accumulators[i].acc.z /= L;
accumulators[i].acc.w /= L;
}
accumulators[i].store(&output[b][D_start + d]);
}
}
Tensor {{ "dense" if dense else "split" }}_embedding_codegen_forward_{{ wdesc }}_cuda(
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t total_D,
int64_t max_D,
Tensor indices,
Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused
) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(dev_weights.get_device());
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
int32_t B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
TORCH_CHECK(total_D > 0);
TORCH_CHECK(total_D % 4 == 0);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
at::Tensor output;
if (dev_weights.type().scalarType() == at::kHalf) {
output = empty({B, total_D}, dev_weights.options().dtype(at::kFloat));
} else {
output = empty({B, total_D}, dev_weights.options());
}
{% if not dense %}
DISPATCH_EMB_CACHE_TYPES(
{% else %}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
{% endif %}
dev_weights.type(),
{% if not dense %}
lxu_cache_weights.type(),
{% endif %}
"batched_embedding_forward_kernel_2", ([&] {
{% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %}
if (max_D <= {{ 128 * kMaxVecsPerThread }}) {
{% if not dense %}
split_embedding_codegen_forward_{{ wdesc }}_kernel<emb_t, cache_t, int64_t, {{ kMaxVecsPerThread }}><<<
{% else %}
dense_embedding_codegen_forward_{{ wdesc }}_kernel<scalar_t, scalar_t, int64_t, {{ kMaxVecsPerThread }}><<<
{% endif %}
div_round_up((B * T), kForwardMaxThreads / kWarpSize),
dim3(kWarpSize, kForwardMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
dev_weights.packed_accessor64<{{ "scalar_t" if dense else "emb_t" }}, 1, RestrictPtrTraits>(),
{% if not dense %}
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
pooling_mode,
{% if weighted %}
indice_weights.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
output.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>()
);
return;
}
{% endfor %}
}));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return output;
}
|
d8ce447d304efe084f4512c64e49423d65e58284.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define MAX_THREADS 1024
__global__ void multiplicar( float *mat1, float *mat2, float *res, int n) {
int j=0; int k=0;
int index = threadIdx.x;
float maximo = n/(float)blockDim.x;
for (int i = 0; i < maximo; i++)
{
if (index < n)
{
for (j = 0; j<n; j++)
{
res[index*n+j]=0;
for (k = 0; k < n; k++)
{
res[index*n+j] += (mat1[index*n + k] * mat2[k*n + j]);
}
}
}
}
}
void printM(float * data, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%.0f ", data[i*cols+j]);
}
printf("\n");
}
printf("\n");
}
int main( int argc, char *argv[] ) {
if ( argc != 2 )
{
printf( "usage: %s N (N debe ser < # bloques totales grid size)", argv[0] );
}
else
{
int n = atoi(argv[1]);
float *mat1= new float[n*n], *mat2=new float[n*n], *res=new float[n*n];
float *mat_1, *mat_2, *mat_r;
float tiempo1, tiempo2;
hipEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
hipEventCreate(&inicio1); // Se inicializan
hipEventCreate(&fin1);
hipEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
srand (time(NULL));
for(int i = 0; i<n*n; i++)
{
mat1[i] = rand()%991 + 10;
}
for(int i = 0; i<n*n; i++)
{
mat2[i] = rand()%991 + 10;
}
// allocate the memory on the GPU
hipMalloc( (void**)&mat_1, n * n * sizeof(float) );
hipMalloc( (void**)&mat_2, n * n * sizeof(float) );
hipMalloc( (void**)&mat_r, n * n * sizeof(float) );
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( mat_1, mat1, n * n * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( mat_2, mat2, n * n * sizeof(float), hipMemcpyHostToDevice );
hipEventCreate(&inicio2); // Se inicializan
hipEventCreate(&fin2);
hipEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
hipLaunchKernelGGL(( multiplicar), dim3(1),dim3(MAX_THREADS), 0, 0, mat_1, mat_2, mat_r, n );
// multiplicar<<<1,n>>>( mat_1, mat_2, mat_r, n );
hipEventRecord( fin2, 0); // Se toma el tiempo final.
hipEventSynchronize( fin2 ); // Se sincroniza
hipEventElapsedTime( &tiempo2, inicio2, fin2 );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( res, mat_r, n * n * sizeof(float), hipMemcpyDeviceToHost );
// free the memory allocated on the GPU
hipFree( mat_1 );
hipFree( mat_2 );
hipFree( mat_r );
hipEventRecord( fin1, 0); // Se toma el tiempo final.
hipEventSynchronize( fin1 ); // Se sincroniza
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
if (res !=0)
{
//printf("\nMatriz de Resultado\n\n");
//printM(res, n, n);
}
free(mat1);
free(mat2);
free(res);
printf("Tiempo clculo %f ms\n", tiempo2);
printf("Tiempo total %f ms\n", tiempo1);
return 0;
}
//float * res = multiplicar(mat1, mat2, n);
}//Cierre de main
| d8ce447d304efe084f4512c64e49423d65e58284.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define MAX_THREADS 1024
__global__ void multiplicar( float *mat1, float *mat2, float *res, int n) {
int j=0; int k=0;
int index = threadIdx.x;
float maximo = n/(float)blockDim.x;
for (int i = 0; i < maximo; i++)
{
if (index < n)
{
for (j = 0; j<n; j++)
{
res[index*n+j]=0;
for (k = 0; k < n; k++)
{
res[index*n+j] += (mat1[index*n + k] * mat2[k*n + j]);
}
}
}
}
}
void printM(float * data, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%.0f ", data[i*cols+j]);
}
printf("\n");
}
printf("\n");
}
int main( int argc, char *argv[] ) {
if ( argc != 2 )
{
printf( "usage: %s N (N debe ser < # bloques totales grid size)", argv[0] );
}
else
{
int n = atoi(argv[1]);
float *mat1= new float[n*n], *mat2=new float[n*n], *res=new float[n*n];
float *mat_1, *mat_2, *mat_r;
float tiempo1, tiempo2;
cudaEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
srand (time(NULL));
for(int i = 0; i<n*n; i++)
{
mat1[i] = rand()%991 + 10;
}
for(int i = 0; i<n*n; i++)
{
mat2[i] = rand()%991 + 10;
}
// allocate the memory on the GPU
cudaMalloc( (void**)&mat_1, n * n * sizeof(float) );
cudaMalloc( (void**)&mat_2, n * n * sizeof(float) );
cudaMalloc( (void**)&mat_r, n * n * sizeof(float) );
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( mat_1, mat1, n * n * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( mat_2, mat2, n * n * sizeof(float), cudaMemcpyHostToDevice );
cudaEventCreate(&inicio2); // Se inicializan
cudaEventCreate(&fin2);
cudaEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
multiplicar<<<1,MAX_THREADS>>>( mat_1, mat_2, mat_r, n );
// multiplicar<<<1,n>>>( mat_1, mat_2, mat_r, n );
cudaEventRecord( fin2, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin2 ); // Se sincroniza
cudaEventElapsedTime( &tiempo2, inicio2, fin2 );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( res, mat_r, n * n * sizeof(float), cudaMemcpyDeviceToHost );
// free the memory allocated on the GPU
cudaFree( mat_1 );
cudaFree( mat_2 );
cudaFree( mat_r );
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
if (res !=0)
{
//printf("\nMatriz de Resultado\n\n");
//printM(res, n, n);
}
free(mat1);
free(mat2);
free(res);
printf("Tiempo cálculo %f ms\n", tiempo2);
printf("Tiempo total %f ms\n", tiempo1);
return 0;
}
//float * res = multiplicar(mat1, mat2, n);
}//Cierre de main
|
a700680d50afa7e92fde64bb41fd88e8d01e0c0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <scalar.h>
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 == d1) {
return 1;
}
return 0;
}
extern "C"
__global__ void equals_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
| a700680d50afa7e92fde64bb41fd88e8d01e0c0c.cu | #include <scalar.h>
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 == d1) {
return 1;
}
return 0;
}
extern "C"
__global__ void equals_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
|
85748f12b816c55d21f7ed8bd660a1234737840e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "centroid_restraint.hpp"
#include "energy_accumulation.hpp"
#include "gpu_utils.cuh"
#include "k_centroid_restraint.cuh"
#include "math_utils.cuh"
#include <vector>
namespace timemachine {
template <typename RealType>
CentroidRestraint<RealType>::CentroidRestraint(
const std::vector<int> &group_a_idxs, const std::vector<int> &group_b_idxs, const double kb, const double b0)
: N_A_(group_a_idxs.size()), N_B_(group_b_idxs.size()), kb_(kb), b0_(b0) {
cudaSafeMalloc(&d_group_a_idxs_, N_A_ * sizeof(*d_group_a_idxs_));
gpuErrchk(hipMemcpy(d_group_a_idxs_, &group_a_idxs[0], N_A_ * sizeof(*d_group_a_idxs_), hipMemcpyHostToDevice));
cudaSafeMalloc(&d_group_b_idxs_, N_B_ * sizeof(*d_group_b_idxs_));
gpuErrchk(hipMemcpy(d_group_b_idxs_, &group_b_idxs[0], N_B_ * sizeof(*d_group_b_idxs_), hipMemcpyHostToDevice));
cudaSafeMalloc(&d_centroid_a_, 3 * sizeof(*d_centroid_a_));
cudaSafeMalloc(&d_centroid_b_, 3 * sizeof(*d_centroid_b_));
cudaSafeMalloc(&d_u_buffer_, sizeof(*d_u_buffer_));
};
template <typename RealType> CentroidRestraint<RealType>::~CentroidRestraint() {
gpuErrchk(hipFree(d_group_a_idxs_));
gpuErrchk(hipFree(d_group_b_idxs_));
gpuErrchk(hipFree(d_centroid_a_));
gpuErrchk(hipFree(d_centroid_b_));
gpuErrchk(hipFree(d_u_buffer_));
};
template <typename RealType>
void CentroidRestraint<RealType>::execute_device(
const int N,
const int P,
const double *d_x,
const double *d_p,
const double *d_box,
unsigned long long *d_du_dx,
unsigned long long *d_du_dp,
__int128 *d_u,
hipStream_t stream) {
if (N_B_ + N_A_ > 0) {
int tpb = DEFAULT_THREADS_PER_BLOCK;
int blocks = ceil_divide(N_B_ + N_A_, tpb);
gpuErrchk(hipMemsetAsync(d_centroid_a_, 0.0, 3 * sizeof(*d_centroid_a_), stream));
gpuErrchk(hipMemsetAsync(d_centroid_b_, 0.0, 3 * sizeof(*d_centroid_b_), stream));
hipLaunchKernelGGL(( k_calc_centroid<RealType>), dim3(blocks), dim3(tpb), 0, stream,
d_x, d_group_a_idxs_, d_group_b_idxs_, N_A_, N_B_, d_centroid_a_, d_centroid_b_);
gpuErrchk(hipPeekAtLastError());
hipLaunchKernelGGL(( k_centroid_restraint<RealType>), dim3(blocks), dim3(tpb), 0, stream,
d_x,
d_group_a_idxs_,
d_group_b_idxs_,
N_A_,
N_B_,
d_centroid_a_,
d_centroid_b_,
kb_,
b0_,
d_du_dx,
d_u == nullptr ? nullptr : d_u_buffer_);
gpuErrchk(hipPeekAtLastError());
if (d_u) {
accumulate_energy(1, d_u_buffer_, d_u, stream);
}
}
};
template class CentroidRestraint<double>;
template class CentroidRestraint<float>;
} // namespace timemachine
| 85748f12b816c55d21f7ed8bd660a1234737840e.cu | #include "centroid_restraint.hpp"
#include "energy_accumulation.hpp"
#include "gpu_utils.cuh"
#include "k_centroid_restraint.cuh"
#include "math_utils.cuh"
#include <vector>
namespace timemachine {
template <typename RealType>
CentroidRestraint<RealType>::CentroidRestraint(
const std::vector<int> &group_a_idxs, const std::vector<int> &group_b_idxs, const double kb, const double b0)
: N_A_(group_a_idxs.size()), N_B_(group_b_idxs.size()), kb_(kb), b0_(b0) {
cudaSafeMalloc(&d_group_a_idxs_, N_A_ * sizeof(*d_group_a_idxs_));
gpuErrchk(cudaMemcpy(d_group_a_idxs_, &group_a_idxs[0], N_A_ * sizeof(*d_group_a_idxs_), cudaMemcpyHostToDevice));
cudaSafeMalloc(&d_group_b_idxs_, N_B_ * sizeof(*d_group_b_idxs_));
gpuErrchk(cudaMemcpy(d_group_b_idxs_, &group_b_idxs[0], N_B_ * sizeof(*d_group_b_idxs_), cudaMemcpyHostToDevice));
cudaSafeMalloc(&d_centroid_a_, 3 * sizeof(*d_centroid_a_));
cudaSafeMalloc(&d_centroid_b_, 3 * sizeof(*d_centroid_b_));
cudaSafeMalloc(&d_u_buffer_, sizeof(*d_u_buffer_));
};
template <typename RealType> CentroidRestraint<RealType>::~CentroidRestraint() {
gpuErrchk(cudaFree(d_group_a_idxs_));
gpuErrchk(cudaFree(d_group_b_idxs_));
gpuErrchk(cudaFree(d_centroid_a_));
gpuErrchk(cudaFree(d_centroid_b_));
gpuErrchk(cudaFree(d_u_buffer_));
};
template <typename RealType>
void CentroidRestraint<RealType>::execute_device(
const int N,
const int P,
const double *d_x,
const double *d_p,
const double *d_box,
unsigned long long *d_du_dx,
unsigned long long *d_du_dp,
__int128 *d_u,
cudaStream_t stream) {
if (N_B_ + N_A_ > 0) {
int tpb = DEFAULT_THREADS_PER_BLOCK;
int blocks = ceil_divide(N_B_ + N_A_, tpb);
gpuErrchk(cudaMemsetAsync(d_centroid_a_, 0.0, 3 * sizeof(*d_centroid_a_), stream));
gpuErrchk(cudaMemsetAsync(d_centroid_b_, 0.0, 3 * sizeof(*d_centroid_b_), stream));
k_calc_centroid<RealType><<<blocks, tpb, 0, stream>>>(
d_x, d_group_a_idxs_, d_group_b_idxs_, N_A_, N_B_, d_centroid_a_, d_centroid_b_);
gpuErrchk(cudaPeekAtLastError());
k_centroid_restraint<RealType><<<blocks, tpb, 0, stream>>>(
d_x,
d_group_a_idxs_,
d_group_b_idxs_,
N_A_,
N_B_,
d_centroid_a_,
d_centroid_b_,
kb_,
b0_,
d_du_dx,
d_u == nullptr ? nullptr : d_u_buffer_);
gpuErrchk(cudaPeekAtLastError());
if (d_u) {
accumulate_energy(1, d_u_buffer_, d_u, stream);
}
}
};
template class CentroidRestraint<double>;
template class CentroidRestraint<float>;
} // namespace timemachine
|
0962acd1e96f075b946e38411aaff31daa3c90dc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <cuml/common/logger.hpp>
#include <cuml/genetic/common.h>
#include <cuml/genetic/genetic.h>
#include <cuml/genetic/node.h>
#include <cuml/genetic/program.h>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
#include <vector>
namespace cuml {
namespace genetic {
/**
* @brief Tests the training and inference of the symbolic regressor, classifier and transformer
* on y = 0.5X[0] + 0.4 X[1]
*
*/
class GeneticEvolutionTest : public ::testing::Test {
public:
GeneticEvolutionTest()
: d_train(0, hipStream_t(0)),
d_trainlab(0, hipStream_t(0)),
d_test(0, hipStream_t(0)),
d_testlab(0, hipStream_t(0)),
d_trainwts(0, hipStream_t(0)),
d_testwts(0, hipStream_t(0)),
stream(handle.get_stream())
{
}
protected:
void SetUp() override
{
ML::Logger::get().setLevel(CUML_LEVEL_INFO);
// Set training param vals
hyper_params.population_size = 5000;
hyper_params.num_features = n_cols;
hyper_params.random_state = 11;
hyper_params.generations = 20;
hyper_params.stopping_criteria = 0.01;
hyper_params.p_crossover = 0.7;
hyper_params.p_subtree_mutation = 0.1;
hyper_params.p_hoist_mutation = 0.05;
hyper_params.p_point_mutation = 0.1;
hyper_params.parsimony_coefficient = 0.01;
// Initialize weights
h_trainwts.resize(n_tr_rows, 1.0f);
h_testwts.resize(n_tst_rows, 1.0f);
// resize device memory
d_train.resize(n_cols * n_tr_rows, stream);
d_trainlab.resize(n_tr_rows, stream);
d_test.resize(n_cols * n_tst_rows, stream);
d_testlab.resize(n_tst_rows, stream);
d_trainwts.resize(n_tr_rows, stream);
d_testwts.resize(n_tst_rows, stream);
// Memcpy HtoD
RAFT_CUDA_TRY(hipMemcpyAsync(d_train.data(),
h_train.data(),
n_cols * n_tr_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpyAsync(d_trainlab.data(),
h_trainlab.data(),
n_tr_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpyAsync(d_test.data(),
h_test.data(),
n_cols * n_tst_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpyAsync(d_testlab.data(),
h_testlab.data(),
n_tst_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpyAsync(d_trainwts.data(),
h_trainwts.data(),
n_tr_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(hipMemcpyAsync(d_testwts.data(),
h_testwts.data(),
n_tst_rows * sizeof(float),
hipMemcpyHostToDevice,
stream));
}
raft::handle_t handle;
hipStream_t stream;
param hyper_params;
// Some mini-dataset constants
const int n_tr_rows = 250;
const int n_tst_rows = 50;
const int n_cols = 2;
const float tolerance = 0.025f; // assuming upto 2.5% tolerance for results(for now)
// Contains synthetic Data
// y =
std::vector<float> h_train = {
0.2119566, -0.7221057, 0.9944866, -0.6420138, 0.3243210, -0.8062112, 0.9247920, -0.8267401,
0.2330494, 0.1486086, -0.0957095, 0.1386102, 0.1674080, 0.0356288, 0.4644501, 0.3442579,
0.6560287, 0.2349779, -0.3978628, 0.1793082, -0.1155355, 0.0176618, 0.8318791, 0.7813108,
0.2736598, 0.6475824, -0.3849131, -0.4696701, -0.6907704, 0.2952283, -0.8723270, -0.3355115,
-0.0523054, -0.8182662, 0.5539537, -0.8737933, 0.5849895, -0.2579604, 0.3574578, -0.1654855,
-0.2554073, 0.3591112, 0.9403976, -0.3390219, 0.6517981, 0.6465558, 0.4370021, -0.0079799,
0.2970910, 0.2452746, -0.7523201, -0.0951637, 0.6400041, -0.5386036, 0.4352954, -0.2126355,
0.6203773, 0.7159789, -0.6823127, 0.4670905, -0.4666402, 0.0071169, 0.5038485, -0.5780727,
0.7944591, 0.6328644, 0.1813934, 0.2653100, -0.1671608, 0.8108285, 0.3609906, -0.5820257,
0.0447571, 0.7247062, 0.3546630, 0.5908147, -0.1850210, 0.8889677, 0.4725176, 0.2190818,
0.1944676, -0.1650774, 0.5239485, 0.4871244, 0.8803309, 0.3119077, -0.1502819, 0.2140640,
-0.3925484, 0.1745171, -0.0332719, 0.9880465, 0.5828160, 0.3987538, 0.4770127, -0.4151363,
-0.9899210, 0.7880531, -0.3253276, -0.4564783, -0.9825586, -0.0729553, 0.7512086, 0.3045725,
-0.5038860, -0.9412159, -0.8188231, -0.3728235, 0.2280060, -0.4212141, -0.2424457, -0.5574245,
-0.5845115, 0.7049432, -0.5244312, -0.0405502, -0.2238990, 0.6347900, 0.9998363, 0.3580613,
0.0199144, -0.1971139, 0.8036406, 0.7131155, 0.5613965, 0.3835140, 0.0717551, 0.0463067,
0.5255786, 0.0928743, 0.1386557, -0.7212757, 0.3051646, 0.2635859, -0.5229289, -0.8547997,
0.6653103, -0.1116264, 0.2930650, 0.5135837, 0.7412015, -0.3735900, -0.9826624, -0.6185324,
-0.8464018, -0.4180478, 0.7254488, -0.5188612, -0.3333993, 0.8999060, -0.6015426, -0.6545046,
0.6795465, -0.5157862, 0.4536161, -0.7564244, -0.0614987, 0.9840064, 0.3975551, 0.8684530,
0.6091788, 0.2544823, -0.9745569, -0.1815226, -0.1521985, 0.8436312, -0.9446849, -0.2546227,
0.9108996, -0.2374187, -0.8820541, -0.2937101, 0.2558129, 0.7706293, 0.1066034, -0.7223888,
-0.6807924, -0.5187497, -0.3461997, 0.3319379, -0.5073046, 0.0713026, 0.4598049, -0.9708425,
-0.2323956, 0.3963093, -0.9132538, -0.2047350, 0.1162403, -0.6301352, -0.1114944, -0.4411873,
-0.7517651, 0.9942231, 0.6387486, -0.3516690, 0.2925287, 0.8415794, -0.2203800, 0.1182607,
-0.5032156, 0.4939238, 0.9852490, -0.8617036, -0.8945347, 0.1789286, -0.1909516, 0.2587640,
-0.2992706, 0.6049703, -0.1238372, 0.8297717, -0.3196876, 0.9792059, 0.7898732, 0.8210509,
-0.5545098, -0.5691904, -0.7678227, -0.9643255, -0.1002291, -0.4273028, -0.6697328, -0.3049299,
-0.0368014, 0.4804423, -0.6646156, 0.5903011, -0.1700153, -0.6397213, 0.9845422, -0.5159376,
0.1589690, -0.3279489, -0.1498093, -0.9002322, 0.1960990, 0.3850992, 0.4812583, -0.1506606,
-0.0863564, -0.4061224, -0.3599582, -0.2919797, -0.5094189, 0.7824159, 0.3322580, -0.3275573,
-0.9909980, -0.5806390, 0.4667387, -0.3746538, -0.7436752, 0.5058509, 0.5686203, -0.8828574,
0.2331149, 0.1225447, 0.9276860, -0.2576783, -0.5962995, -0.6098081, -0.0473731, 0.6461973,
-0.8618875, 0.2869696, -0.5910612, 0.2354020, 0.7434812, 0.9635402, -0.7473646, -0.1364276,
0.4180313, 0.1777712, -0.3155821, -0.3896985, -0.5973547, 0.3018475, -0.2226010, 0.6965982,
-0.1711176, 0.4426420, 0.5972827, 0.7491136, 0.5431328, 0.1888770, -0.4517326, 0.7062291,
0.5087549, -0.3582025, -0.4492956, 0.1632529, -0.1689859, 0.9334283, -0.3891996, 0.1138209,
0.7598738, 0.0241726, -0.3133468, -0.0708007, 0.9602417, -0.7650007, -0.6497396, 0.4096349,
-0.7035034, 0.6052362, 0.5920056, -0.4065195, 0.3722862, -0.7039886, -0.2351859, 0.3143256,
-0.8650362, 0.3481469, 0.5242298, 0.2190642, 0.7090682, 0.7368234, 0.3148258, -0.8396302,
-0.8332214, 0.6766308, 0.4428585, 0.5376374, 0.1104256, -0.9560977, 0.8913012, 0.2302127,
-0.7445556, -0.8753514, -0.1434969, 0.7423451, -0.9627953, 0.7919458, -0.8590292, -0.2405730,
0.0733800, -0.1964383, 0.3429065, -0.5199867, -0.6148949, -0.4645573, -0.1036227, 0.1915514,
0.4981042, -0.3142545, -0.1360139, 0.5123143, -0.8319357, 0.2593685, -0.6637208, 0.8695423,
-0.4745009, -0.4598881, 0.2561057, 0.8682946, 0.7572707, -0.2405597, -0.6909520, -0.2329739,
-0.3544887, 0.5916605, -0.5483196, 0.3634111, 0.0485800, 0.1492287, -0.0361141, 0.6510856,
0.9754849, -0.1871928, 0.7787021, -0.6019276, 0.2416331, -0.1160285, 0.8894659, 0.9423820,
-0.7052383, -0.8790381, -0.7129928, 0.5332075, -0.5728216, -0.9184565, 0.0437820, 0.3580015,
-0.7459742, -0.6401960, -0.7465842, -0.0257084, 0.7586666, 0.3472861, 0.3226733, -0.8356623,
0.9038333, 0.9519323, 0.6794367, -0.4118270, -0.1475553, 0.1638173, 0.7039975, 0.0782125,
-0.6468386, -0.4905404, -0.0657285, -0.9094056, -0.1691999, 0.9545628, 0.5260556, 0.0704832,
0.9559255, 0.4109315, 0.0437353, 0.1975988, -0.2173066, 0.4840004, -0.9305912, 0.6281645,
-0.2873839, -0.0092089, -0.7423917, -0.5064726, 0.2959957, 0.3744118, -0.2324660, 0.6419766,
0.0482254, 0.0711853, -0.0668010, -0.6056250, -0.6424942, 0.5091138, -0.7920839, -0.3631541,
0.2925649, 0.8553973, -0.5368195, -0.8043768, 0.6299060, -0.7402435, 0.7831608, -0.4979353,
-0.7786197, 0.1855255, -0.7243119, 0.7581270, 0.7850708, -0.6414960, -0.4423507, -0.4211898,
0.8494025, 0.3603602, -0.3777632, 0.3322407, -0.0483915, -0.8515641, -0.9453503, -0.4536391,
-0.1080792, 0.5246211, 0.2128397, -0.0146389, -0.7508293, -0.0058518, 0.5420505, 0.1439000,
0.1900943, 0.0454271, 0.3117409, 0.1234926, -0.1166942, 0.2856016, 0.8390452, 0.8877837,
0.0886838, -0.7009126, -0.5130350, -0.0999212, 0.3338176, -0.3013774, 0.3526511, 0.9518843,
0.5853393, -0.1422507, -0.9768327, -0.5915277, 0.9691055, 0.4186211, 0.7512146, 0.5220292,
-0.1700221, 0.5423641, 0.5864487, -0.7437551, -0.5076052, -0.8304062, 0.4895252, 0.7349310,
0.7687441, 0.6319372, 0.7462888, 0.2358095};
std::vector<float> h_trainlab = {
-0.7061807, -0.9935827, -1.3077246, -0.3378525, -0.6495246, -2.0123182, 0.0340125, -0.2089733,
-0.8786033, -1.3019919, -1.9427123, -1.9624611, -1.0215918, -0.7701042, -2.3890236, -0.6768685,
-1.5100409, -0.7647975, -0.6509883, -0.9327181, -2.2925701, -1.1547282, -0.0646960, -0.2433849,
-1.3402845, -1.1222004, -1.8060292, -0.5686744, -0.7949885, -0.7014911, -0.4394445, -0.6407220,
-0.7567281, -0.1424980, -0.4449957, -0.0832827, -1.3135824, -0.7259869, -0.6223005, -1.4591261,
-1.5859294, -0.7344378, -0.3131946, -0.8229243, -1.1158352, -0.4810999, -0.6265636, -0.9763480,
-1.3232699, -1.0156538, -0.3958369, -2.3411706, -1.6622960, -0.4680720, -2.0089384, -0.7158608,
-0.3735971, -1.0591518, -0.3007601, -1.9814152, -1.0727452, -0.7844243, -2.3594606, -0.4388914,
-0.1194218, -0.4284076, -0.7608060, -0.7356959, -0.7563467, -1.8871661, -2.3971652, -0.4424445,
-0.7512620, -0.2262175, -0.7759824, -2.5211585, -0.8688839, -0.0325217, -2.0756457, -2.5935947,
-1.1262706, -0.7814806, -2.6152479, -0.5979422, -1.8219779, -1.2011619, -0.9094200, -1.1892029,
-0.6205842, -1.7599165, -1.9918835, -0.7041349, -0.7746859, -0.6861359, -0.5224625, -1.2406723,
-0.1745701, -0.1291239, -2.4182146, -0.5995310, -1.1388247, -0.8812391, -1.1353377, -1.5786207,
-0.5555833, 0.0002464, -0.1457169, -1.1594313, -2.1163798, -1.1098294, -1.4213709, -0.4476795,
-1.5073204, -0.2717116, -0.6787519, -0.8713962, -0.9872876, -0.3698685, 0.0235867, -1.0940261,
-0.8272783, -1.9253905, -0.1709152, -0.6209573, -0.5865176, -0.7986188, -2.1974506, -2.6496017,
-1.9451187, -0.7424771, -1.8817208, -2.2417800, -0.8650095, -0.7006861, -2.0289972, -1.3193644,
-1.8613344, -1.0139089, -0.7310213, -0.5095533, -0.2320652, -2.3944243, 0.0525441, -0.5716605,
-0.0658016, -1.4066644, -0.6430519, -0.5938018, -0.6804599, -0.1180739, -1.7033852, -1.3027941,
-0.6082652, -2.4703887, -0.9920609, -0.3844494, -0.7468968, 0.0337840, -0.7998180, -0.0037226,
-0.5870786, -0.7766853, -0.3147676, -0.7173055, -2.7734269, -0.0547125, -0.4775438, -0.9444610,
-1.4637991, -1.7066195, -0.0135983, -0.6795068, -1.2210661, -0.1762879, -0.9427360, -0.4120364,
-0.6077851, -1.7033054, -1.9354388, -0.6399003, -2.1621227, -1.4899510, -0.5816087, 0.0662278,
-1.7709871, -2.2943379, 0.0671570, -2.2462875, -0.8166682, -1.3488045, -2.3724372, -0.6542480,
-1.6837887, 0.1718501, -0.4232655, -1.9293420, -1.5524519, -0.8903348, -0.8235148, -0.7555137,
-1.2672423, -0.5341824, -0.0800176, -1.8341924, -2.0388451, -1.6274120, -1.0832978, -0.6836474,
-0.7428981, -0.6488642, -2.2992384, -0.3173651, -0.6495681, 0.0820371, -0.2221419, -0.2825119,
-0.4779604, -0.5677801, -0.5407600, 0.1339569, -0.8549058, -0.7177885, -0.4706391, -2.0992089,
-1.7748856, -0.8790807, -0.3359026, -1.0437502, -0.7428065, -0.5449560, 0.2120406, -0.8962944,
-2.9057635, -1.8338823, -0.9476171, 0.0537955, -0.7746540, -0.6021839, -0.9673201, -0.7290961,
-0.7500160, -2.1319913, -1.6356984, -2.4347284, -0.4906021, -0.1930180, -0.7118280, -0.6601136,
0.1714188, -0.4826550};
std::vector<float> h_test = {
0.6506153, -0.2861214, -0.4207479, -0.0879224, 0.6963105, 0.7591472, -0.9145728, 0.3606104,
0.5918564, -0.5548665, -0.4487113, 0.0824032, 0.4425484, -0.9139633, -0.7823172, 0.0768981,
0.0922035, -0.0138858, 0.9646097, 0.2624208, -0.7190498, -0.6117298, -0.8807327, 0.2868101,
-0.8899322, 0.9853774, -0.5898669, 0.6281458, 0.5219784, -0.5437135, -0.2806136, -0.0927834,
-0.2291698, 0.0450774, 0.4253027, 0.6545525, 0.7031374, -0.3601150, 0.0715214, -0.9844534,
-0.8571354, -0.8157709, -0.6361769, -0.5510336, 0.4286138, 0.8863587, -0.7481151, -0.6144726,
-0.7920206, -0.2917536, -0.6506116, -0.4862449, -0.0866336, -0.7439836, 0.3753550, 0.2632956,
-0.2270555, 0.1109649, -0.6320683, 0.0280535, 0.6881603, 0.8163167, 0.1781434, -0.8063828,
0.8032009, -0.6779581, -0.8654890, -0.5322430, 0.3786414, 0.0546245, -0.5542659, 0.6897840,
-0.1039676, -0.0343101, 0.4219748, -0.4535081, 0.7228620, 0.3873561, 0.1427819, -0.2881901,
0.5431166, -0.0090170, -0.8354108, -0.0099369, -0.5904349, 0.2928394, 0.3634137, -0.7485119,
-0.5442900, 0.4072478, -0.4909732, 0.0737537, -0.0973075, -0.0848911, 0.7041450, 0.3288523,
-0.5264588, -0.5135713, 0.5130192, -0.0708379};
std::vector<float> h_testlab = {
-1.6506068, -1.6408135, -0.9171102, -2.2897648, -0.2806881, -0.2297245, -0.4421663, -0.7713085,
-1.6812845, -0.6648566, -0.5840624, -0.8432659, -0.6577426, -1.6213072, -0.2299105, -2.1316719,
-2.6060586, -1.8153329, 0.1657440, -0.8794947, -1.3444440, -0.4118046, -0.3390867, -0.9532273,
0.0358915, -0.6882091, -0.4517245, -0.3681215, -0.6051433, -1.0756192, -0.6731151, -1.0004896,
-2.4808031, -1.0080036, -1.7581659, -0.3644765, -0.2742536, -2.1790992, -1.8354263, 0.2105456,
-0.9973469, -0.2662037, -0.7020552, -0.7884595, -0.6079654, 0.0063403, -1.2439414, -1.3997503,
-0.1228729, -0.9907357
};
std::vector<float> h_trainwts;
std::vector<float> h_testwts;
rmm::device_uvector<float> d_train;
rmm::device_uvector<float> d_trainlab;
rmm::device_uvector<float> d_test;
rmm::device_uvector<float> d_testlab;
rmm::device_uvector<float> d_trainwts;
rmm::device_uvector<float> d_testwts;
};
TEST_F(GeneticEvolutionTest, SymReg)
{
raft::CompareApprox<float> compApprox(tolerance);
program_t final_progs;
final_progs = (program_t)rmm::mr::get_current_device_resource()->allocate(
hyper_params.population_size * sizeof(program), stream);
std::vector<std::vector<program>> history;
history.reserve(hyper_params.generations);
hipEvent_t start, stop;
RAFT_CUDA_TRY(hipEventCreate(&start));
RAFT_CUDA_TRY(hipEventCreate(&stop));
hipEventRecord(start, stream);
symFit(handle,
d_train.data(),
d_trainlab.data(),
d_trainwts.data(),
n_tr_rows,
n_cols,
hyper_params,
final_progs,
history);
hipEventRecord(stop, stream);
hipEventSynchronize(stop);
float training_time;
hipEventElapsedTime(&training_time, start, stop);
int n_gen = history.size();
std::cout << "Finished training for " << n_gen << " generations." << std::endl;
// Find index of best program
int best_idx = 0;
float opt_fitness = history[n_gen - 1][0].raw_fitness_;
// For all 3 loss functions - min is better
for (int i = 1; i < hyper_params.population_size; ++i) {
if (history[n_gen - 1][i].raw_fitness_ < opt_fitness) {
best_idx = i;
opt_fitness = history[n_gen - 1][i].raw_fitness_;
}
}
std::string eqn = stringify(history[n_gen - 1][best_idx]);
CUML_LOG_DEBUG("Best Index = %d", best_idx);
std::cout << "Raw fitness score on train set is " << history[n_gen - 1][best_idx].raw_fitness_
<< std::endl;
std::cout << "Best AST equation is : " << eqn << std::endl;
// Predict values for test dataset
rmm::device_uvector<float> d_predlabels(n_tst_rows, stream);
hipEventRecord(start, stream);
cuml::genetic::symRegPredict(
handle, d_test.data(), n_tst_rows, final_progs + best_idx, d_predlabels.data());
std::vector<float> h_predlabels(n_tst_rows, 0.0f);
RAFT_CUDA_TRY(hipMemcpy(
h_predlabels.data(), d_predlabels.data(), n_tst_rows * sizeof(float), hipMemcpyDeviceToHost));
hipEventRecord(stop, stream);
hipEventSynchronize(stop);
float inference_time;
hipEventElapsedTime(&inference_time, start, stop);
// deallocate the nodes allocated for the last generation inside SymFit
for (auto i = 0; i < hyper_params.population_size; ++i) {
program tmp = program();
raft::copy(&tmp, final_progs + i, 1, stream);
rmm::mr::get_current_device_resource()->deallocate(tmp.nodes, tmp.len * sizeof(node), stream);
tmp.nodes = nullptr;
}
// deallocate the final programs from device memory
rmm::mr::get_current_device_resource()->deallocate(
final_progs, hyper_params.population_size * sizeof(program), stream);
ASSERT_TRUE(compApprox(history[n_gen - 1][best_idx].raw_fitness_, 0.0036f));
std::cout << "Some Predicted test values:" << std::endl;
std::copy(
h_predlabels.begin(), h_predlabels.begin() + 10, std::ostream_iterator<float>(std::cout, ";"));
std::cout << std::endl;
std::cout << "Some Actual test values:" << std::endl;
std::copy(
h_testlab.begin(), h_testlab.begin() + 10, std::ostream_iterator<float>(std::cout, ";"));
std::cout << std::endl;
std::cout << "Training time = " << training_time << " ms" << std::endl;
std::cout << "Inference time = " << inference_time << " ms" << std::endl;
}
} // namespace genetic
} // namespace cuml
| 0962acd1e96f075b946e38411aaff31daa3c90dc.cu | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <cuml/common/logger.hpp>
#include <cuml/genetic/common.h>
#include <cuml/genetic/genetic.h>
#include <cuml/genetic/node.h>
#include <cuml/genetic/program.h>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
#include <vector>
namespace cuml {
namespace genetic {
/**
* @brief Tests the training and inference of the symbolic regressor, classifier and transformer
* on y = 0.5X[0] + 0.4 X[1]
*
*/
class GeneticEvolutionTest : public ::testing::Test {
public:
GeneticEvolutionTest()
: d_train(0, cudaStream_t(0)),
d_trainlab(0, cudaStream_t(0)),
d_test(0, cudaStream_t(0)),
d_testlab(0, cudaStream_t(0)),
d_trainwts(0, cudaStream_t(0)),
d_testwts(0, cudaStream_t(0)),
stream(handle.get_stream())
{
}
protected:
void SetUp() override
{
ML::Logger::get().setLevel(CUML_LEVEL_INFO);
// Set training param vals
hyper_params.population_size = 5000;
hyper_params.num_features = n_cols;
hyper_params.random_state = 11;
hyper_params.generations = 20;
hyper_params.stopping_criteria = 0.01;
hyper_params.p_crossover = 0.7;
hyper_params.p_subtree_mutation = 0.1;
hyper_params.p_hoist_mutation = 0.05;
hyper_params.p_point_mutation = 0.1;
hyper_params.parsimony_coefficient = 0.01;
// Initialize weights
h_trainwts.resize(n_tr_rows, 1.0f);
h_testwts.resize(n_tst_rows, 1.0f);
// resize device memory
d_train.resize(n_cols * n_tr_rows, stream);
d_trainlab.resize(n_tr_rows, stream);
d_test.resize(n_cols * n_tst_rows, stream);
d_testlab.resize(n_tst_rows, stream);
d_trainwts.resize(n_tr_rows, stream);
d_testwts.resize(n_tst_rows, stream);
// Memcpy HtoD
RAFT_CUDA_TRY(cudaMemcpyAsync(d_train.data(),
h_train.data(),
n_cols * n_tr_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(d_trainlab.data(),
h_trainlab.data(),
n_tr_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(d_test.data(),
h_test.data(),
n_cols * n_tst_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(d_testlab.data(),
h_testlab.data(),
n_tst_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(d_trainwts.data(),
h_trainwts.data(),
n_tr_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(d_testwts.data(),
h_testwts.data(),
n_tst_rows * sizeof(float),
cudaMemcpyHostToDevice,
stream));
}
raft::handle_t handle;
cudaStream_t stream;
param hyper_params;
// Some mini-dataset constants
const int n_tr_rows = 250;
const int n_tst_rows = 50;
const int n_cols = 2;
const float tolerance = 0.025f; // assuming upto 2.5% tolerance for results(for now)
// Contains synthetic Data
// y =
std::vector<float> h_train = {
0.2119566, -0.7221057, 0.9944866, -0.6420138, 0.3243210, -0.8062112, 0.9247920, -0.8267401,
0.2330494, 0.1486086, -0.0957095, 0.1386102, 0.1674080, 0.0356288, 0.4644501, 0.3442579,
0.6560287, 0.2349779, -0.3978628, 0.1793082, -0.1155355, 0.0176618, 0.8318791, 0.7813108,
0.2736598, 0.6475824, -0.3849131, -0.4696701, -0.6907704, 0.2952283, -0.8723270, -0.3355115,
-0.0523054, -0.8182662, 0.5539537, -0.8737933, 0.5849895, -0.2579604, 0.3574578, -0.1654855,
-0.2554073, 0.3591112, 0.9403976, -0.3390219, 0.6517981, 0.6465558, 0.4370021, -0.0079799,
0.2970910, 0.2452746, -0.7523201, -0.0951637, 0.6400041, -0.5386036, 0.4352954, -0.2126355,
0.6203773, 0.7159789, -0.6823127, 0.4670905, -0.4666402, 0.0071169, 0.5038485, -0.5780727,
0.7944591, 0.6328644, 0.1813934, 0.2653100, -0.1671608, 0.8108285, 0.3609906, -0.5820257,
0.0447571, 0.7247062, 0.3546630, 0.5908147, -0.1850210, 0.8889677, 0.4725176, 0.2190818,
0.1944676, -0.1650774, 0.5239485, 0.4871244, 0.8803309, 0.3119077, -0.1502819, 0.2140640,
-0.3925484, 0.1745171, -0.0332719, 0.9880465, 0.5828160, 0.3987538, 0.4770127, -0.4151363,
-0.9899210, 0.7880531, -0.3253276, -0.4564783, -0.9825586, -0.0729553, 0.7512086, 0.3045725,
-0.5038860, -0.9412159, -0.8188231, -0.3728235, 0.2280060, -0.4212141, -0.2424457, -0.5574245,
-0.5845115, 0.7049432, -0.5244312, -0.0405502, -0.2238990, 0.6347900, 0.9998363, 0.3580613,
0.0199144, -0.1971139, 0.8036406, 0.7131155, 0.5613965, 0.3835140, 0.0717551, 0.0463067,
0.5255786, 0.0928743, 0.1386557, -0.7212757, 0.3051646, 0.2635859, -0.5229289, -0.8547997,
0.6653103, -0.1116264, 0.2930650, 0.5135837, 0.7412015, -0.3735900, -0.9826624, -0.6185324,
-0.8464018, -0.4180478, 0.7254488, -0.5188612, -0.3333993, 0.8999060, -0.6015426, -0.6545046,
0.6795465, -0.5157862, 0.4536161, -0.7564244, -0.0614987, 0.9840064, 0.3975551, 0.8684530,
0.6091788, 0.2544823, -0.9745569, -0.1815226, -0.1521985, 0.8436312, -0.9446849, -0.2546227,
0.9108996, -0.2374187, -0.8820541, -0.2937101, 0.2558129, 0.7706293, 0.1066034, -0.7223888,
-0.6807924, -0.5187497, -0.3461997, 0.3319379, -0.5073046, 0.0713026, 0.4598049, -0.9708425,
-0.2323956, 0.3963093, -0.9132538, -0.2047350, 0.1162403, -0.6301352, -0.1114944, -0.4411873,
-0.7517651, 0.9942231, 0.6387486, -0.3516690, 0.2925287, 0.8415794, -0.2203800, 0.1182607,
-0.5032156, 0.4939238, 0.9852490, -0.8617036, -0.8945347, 0.1789286, -0.1909516, 0.2587640,
-0.2992706, 0.6049703, -0.1238372, 0.8297717, -0.3196876, 0.9792059, 0.7898732, 0.8210509,
-0.5545098, -0.5691904, -0.7678227, -0.9643255, -0.1002291, -0.4273028, -0.6697328, -0.3049299,
-0.0368014, 0.4804423, -0.6646156, 0.5903011, -0.1700153, -0.6397213, 0.9845422, -0.5159376,
0.1589690, -0.3279489, -0.1498093, -0.9002322, 0.1960990, 0.3850992, 0.4812583, -0.1506606,
-0.0863564, -0.4061224, -0.3599582, -0.2919797, -0.5094189, 0.7824159, 0.3322580, -0.3275573,
-0.9909980, -0.5806390, 0.4667387, -0.3746538, -0.7436752, 0.5058509, 0.5686203, -0.8828574,
0.2331149, 0.1225447, 0.9276860, -0.2576783, -0.5962995, -0.6098081, -0.0473731, 0.6461973,
-0.8618875, 0.2869696, -0.5910612, 0.2354020, 0.7434812, 0.9635402, -0.7473646, -0.1364276,
0.4180313, 0.1777712, -0.3155821, -0.3896985, -0.5973547, 0.3018475, -0.2226010, 0.6965982,
-0.1711176, 0.4426420, 0.5972827, 0.7491136, 0.5431328, 0.1888770, -0.4517326, 0.7062291,
0.5087549, -0.3582025, -0.4492956, 0.1632529, -0.1689859, 0.9334283, -0.3891996, 0.1138209,
0.7598738, 0.0241726, -0.3133468, -0.0708007, 0.9602417, -0.7650007, -0.6497396, 0.4096349,
-0.7035034, 0.6052362, 0.5920056, -0.4065195, 0.3722862, -0.7039886, -0.2351859, 0.3143256,
-0.8650362, 0.3481469, 0.5242298, 0.2190642, 0.7090682, 0.7368234, 0.3148258, -0.8396302,
-0.8332214, 0.6766308, 0.4428585, 0.5376374, 0.1104256, -0.9560977, 0.8913012, 0.2302127,
-0.7445556, -0.8753514, -0.1434969, 0.7423451, -0.9627953, 0.7919458, -0.8590292, -0.2405730,
0.0733800, -0.1964383, 0.3429065, -0.5199867, -0.6148949, -0.4645573, -0.1036227, 0.1915514,
0.4981042, -0.3142545, -0.1360139, 0.5123143, -0.8319357, 0.2593685, -0.6637208, 0.8695423,
-0.4745009, -0.4598881, 0.2561057, 0.8682946, 0.7572707, -0.2405597, -0.6909520, -0.2329739,
-0.3544887, 0.5916605, -0.5483196, 0.3634111, 0.0485800, 0.1492287, -0.0361141, 0.6510856,
0.9754849, -0.1871928, 0.7787021, -0.6019276, 0.2416331, -0.1160285, 0.8894659, 0.9423820,
-0.7052383, -0.8790381, -0.7129928, 0.5332075, -0.5728216, -0.9184565, 0.0437820, 0.3580015,
-0.7459742, -0.6401960, -0.7465842, -0.0257084, 0.7586666, 0.3472861, 0.3226733, -0.8356623,
0.9038333, 0.9519323, 0.6794367, -0.4118270, -0.1475553, 0.1638173, 0.7039975, 0.0782125,
-0.6468386, -0.4905404, -0.0657285, -0.9094056, -0.1691999, 0.9545628, 0.5260556, 0.0704832,
0.9559255, 0.4109315, 0.0437353, 0.1975988, -0.2173066, 0.4840004, -0.9305912, 0.6281645,
-0.2873839, -0.0092089, -0.7423917, -0.5064726, 0.2959957, 0.3744118, -0.2324660, 0.6419766,
0.0482254, 0.0711853, -0.0668010, -0.6056250, -0.6424942, 0.5091138, -0.7920839, -0.3631541,
0.2925649, 0.8553973, -0.5368195, -0.8043768, 0.6299060, -0.7402435, 0.7831608, -0.4979353,
-0.7786197, 0.1855255, -0.7243119, 0.7581270, 0.7850708, -0.6414960, -0.4423507, -0.4211898,
0.8494025, 0.3603602, -0.3777632, 0.3322407, -0.0483915, -0.8515641, -0.9453503, -0.4536391,
-0.1080792, 0.5246211, 0.2128397, -0.0146389, -0.7508293, -0.0058518, 0.5420505, 0.1439000,
0.1900943, 0.0454271, 0.3117409, 0.1234926, -0.1166942, 0.2856016, 0.8390452, 0.8877837,
0.0886838, -0.7009126, -0.5130350, -0.0999212, 0.3338176, -0.3013774, 0.3526511, 0.9518843,
0.5853393, -0.1422507, -0.9768327, -0.5915277, 0.9691055, 0.4186211, 0.7512146, 0.5220292,
-0.1700221, 0.5423641, 0.5864487, -0.7437551, -0.5076052, -0.8304062, 0.4895252, 0.7349310,
0.7687441, 0.6319372, 0.7462888, 0.2358095};
std::vector<float> h_trainlab = {
-0.7061807, -0.9935827, -1.3077246, -0.3378525, -0.6495246, -2.0123182, 0.0340125, -0.2089733,
-0.8786033, -1.3019919, -1.9427123, -1.9624611, -1.0215918, -0.7701042, -2.3890236, -0.6768685,
-1.5100409, -0.7647975, -0.6509883, -0.9327181, -2.2925701, -1.1547282, -0.0646960, -0.2433849,
-1.3402845, -1.1222004, -1.8060292, -0.5686744, -0.7949885, -0.7014911, -0.4394445, -0.6407220,
-0.7567281, -0.1424980, -0.4449957, -0.0832827, -1.3135824, -0.7259869, -0.6223005, -1.4591261,
-1.5859294, -0.7344378, -0.3131946, -0.8229243, -1.1158352, -0.4810999, -0.6265636, -0.9763480,
-1.3232699, -1.0156538, -0.3958369, -2.3411706, -1.6622960, -0.4680720, -2.0089384, -0.7158608,
-0.3735971, -1.0591518, -0.3007601, -1.9814152, -1.0727452, -0.7844243, -2.3594606, -0.4388914,
-0.1194218, -0.4284076, -0.7608060, -0.7356959, -0.7563467, -1.8871661, -2.3971652, -0.4424445,
-0.7512620, -0.2262175, -0.7759824, -2.5211585, -0.8688839, -0.0325217, -2.0756457, -2.5935947,
-1.1262706, -0.7814806, -2.6152479, -0.5979422, -1.8219779, -1.2011619, -0.9094200, -1.1892029,
-0.6205842, -1.7599165, -1.9918835, -0.7041349, -0.7746859, -0.6861359, -0.5224625, -1.2406723,
-0.1745701, -0.1291239, -2.4182146, -0.5995310, -1.1388247, -0.8812391, -1.1353377, -1.5786207,
-0.5555833, 0.0002464, -0.1457169, -1.1594313, -2.1163798, -1.1098294, -1.4213709, -0.4476795,
-1.5073204, -0.2717116, -0.6787519, -0.8713962, -0.9872876, -0.3698685, 0.0235867, -1.0940261,
-0.8272783, -1.9253905, -0.1709152, -0.6209573, -0.5865176, -0.7986188, -2.1974506, -2.6496017,
-1.9451187, -0.7424771, -1.8817208, -2.2417800, -0.8650095, -0.7006861, -2.0289972, -1.3193644,
-1.8613344, -1.0139089, -0.7310213, -0.5095533, -0.2320652, -2.3944243, 0.0525441, -0.5716605,
-0.0658016, -1.4066644, -0.6430519, -0.5938018, -0.6804599, -0.1180739, -1.7033852, -1.3027941,
-0.6082652, -2.4703887, -0.9920609, -0.3844494, -0.7468968, 0.0337840, -0.7998180, -0.0037226,
-0.5870786, -0.7766853, -0.3147676, -0.7173055, -2.7734269, -0.0547125, -0.4775438, -0.9444610,
-1.4637991, -1.7066195, -0.0135983, -0.6795068, -1.2210661, -0.1762879, -0.9427360, -0.4120364,
-0.6077851, -1.7033054, -1.9354388, -0.6399003, -2.1621227, -1.4899510, -0.5816087, 0.0662278,
-1.7709871, -2.2943379, 0.0671570, -2.2462875, -0.8166682, -1.3488045, -2.3724372, -0.6542480,
-1.6837887, 0.1718501, -0.4232655, -1.9293420, -1.5524519, -0.8903348, -0.8235148, -0.7555137,
-1.2672423, -0.5341824, -0.0800176, -1.8341924, -2.0388451, -1.6274120, -1.0832978, -0.6836474,
-0.7428981, -0.6488642, -2.2992384, -0.3173651, -0.6495681, 0.0820371, -0.2221419, -0.2825119,
-0.4779604, -0.5677801, -0.5407600, 0.1339569, -0.8549058, -0.7177885, -0.4706391, -2.0992089,
-1.7748856, -0.8790807, -0.3359026, -1.0437502, -0.7428065, -0.5449560, 0.2120406, -0.8962944,
-2.9057635, -1.8338823, -0.9476171, 0.0537955, -0.7746540, -0.6021839, -0.9673201, -0.7290961,
-0.7500160, -2.1319913, -1.6356984, -2.4347284, -0.4906021, -0.1930180, -0.7118280, -0.6601136,
0.1714188, -0.4826550};
std::vector<float> h_test = {
0.6506153, -0.2861214, -0.4207479, -0.0879224, 0.6963105, 0.7591472, -0.9145728, 0.3606104,
0.5918564, -0.5548665, -0.4487113, 0.0824032, 0.4425484, -0.9139633, -0.7823172, 0.0768981,
0.0922035, -0.0138858, 0.9646097, 0.2624208, -0.7190498, -0.6117298, -0.8807327, 0.2868101,
-0.8899322, 0.9853774, -0.5898669, 0.6281458, 0.5219784, -0.5437135, -0.2806136, -0.0927834,
-0.2291698, 0.0450774, 0.4253027, 0.6545525, 0.7031374, -0.3601150, 0.0715214, -0.9844534,
-0.8571354, -0.8157709, -0.6361769, -0.5510336, 0.4286138, 0.8863587, -0.7481151, -0.6144726,
-0.7920206, -0.2917536, -0.6506116, -0.4862449, -0.0866336, -0.7439836, 0.3753550, 0.2632956,
-0.2270555, 0.1109649, -0.6320683, 0.0280535, 0.6881603, 0.8163167, 0.1781434, -0.8063828,
0.8032009, -0.6779581, -0.8654890, -0.5322430, 0.3786414, 0.0546245, -0.5542659, 0.6897840,
-0.1039676, -0.0343101, 0.4219748, -0.4535081, 0.7228620, 0.3873561, 0.1427819, -0.2881901,
0.5431166, -0.0090170, -0.8354108, -0.0099369, -0.5904349, 0.2928394, 0.3634137, -0.7485119,
-0.5442900, 0.4072478, -0.4909732, 0.0737537, -0.0973075, -0.0848911, 0.7041450, 0.3288523,
-0.5264588, -0.5135713, 0.5130192, -0.0708379};
std::vector<float> h_testlab = {
-1.6506068, -1.6408135, -0.9171102, -2.2897648, -0.2806881, -0.2297245, -0.4421663, -0.7713085,
-1.6812845, -0.6648566, -0.5840624, -0.8432659, -0.6577426, -1.6213072, -0.2299105, -2.1316719,
-2.6060586, -1.8153329, 0.1657440, -0.8794947, -1.3444440, -0.4118046, -0.3390867, -0.9532273,
0.0358915, -0.6882091, -0.4517245, -0.3681215, -0.6051433, -1.0756192, -0.6731151, -1.0004896,
-2.4808031, -1.0080036, -1.7581659, -0.3644765, -0.2742536, -2.1790992, -1.8354263, 0.2105456,
-0.9973469, -0.2662037, -0.7020552, -0.7884595, -0.6079654, 0.0063403, -1.2439414, -1.3997503,
-0.1228729, -0.9907357
};
std::vector<float> h_trainwts;
std::vector<float> h_testwts;
rmm::device_uvector<float> d_train;
rmm::device_uvector<float> d_trainlab;
rmm::device_uvector<float> d_test;
rmm::device_uvector<float> d_testlab;
rmm::device_uvector<float> d_trainwts;
rmm::device_uvector<float> d_testwts;
};
TEST_F(GeneticEvolutionTest, SymReg)
{
raft::CompareApprox<float> compApprox(tolerance);
program_t final_progs;
final_progs = (program_t)rmm::mr::get_current_device_resource()->allocate(
hyper_params.population_size * sizeof(program), stream);
std::vector<std::vector<program>> history;
history.reserve(hyper_params.generations);
cudaEvent_t start, stop;
RAFT_CUDA_TRY(cudaEventCreate(&start));
RAFT_CUDA_TRY(cudaEventCreate(&stop));
cudaEventRecord(start, stream);
symFit(handle,
d_train.data(),
d_trainlab.data(),
d_trainwts.data(),
n_tr_rows,
n_cols,
hyper_params,
final_progs,
history);
cudaEventRecord(stop, stream);
cudaEventSynchronize(stop);
float training_time;
cudaEventElapsedTime(&training_time, start, stop);
int n_gen = history.size();
std::cout << "Finished training for " << n_gen << " generations." << std::endl;
// Find index of best program
int best_idx = 0;
float opt_fitness = history[n_gen - 1][0].raw_fitness_;
// For all 3 loss functions - min is better
for (int i = 1; i < hyper_params.population_size; ++i) {
if (history[n_gen - 1][i].raw_fitness_ < opt_fitness) {
best_idx = i;
opt_fitness = history[n_gen - 1][i].raw_fitness_;
}
}
std::string eqn = stringify(history[n_gen - 1][best_idx]);
CUML_LOG_DEBUG("Best Index = %d", best_idx);
std::cout << "Raw fitness score on train set is " << history[n_gen - 1][best_idx].raw_fitness_
<< std::endl;
std::cout << "Best AST equation is : " << eqn << std::endl;
// Predict values for test dataset
rmm::device_uvector<float> d_predlabels(n_tst_rows, stream);
cudaEventRecord(start, stream);
cuml::genetic::symRegPredict(
handle, d_test.data(), n_tst_rows, final_progs + best_idx, d_predlabels.data());
std::vector<float> h_predlabels(n_tst_rows, 0.0f);
RAFT_CUDA_TRY(cudaMemcpy(
h_predlabels.data(), d_predlabels.data(), n_tst_rows * sizeof(float), cudaMemcpyDeviceToHost));
cudaEventRecord(stop, stream);
cudaEventSynchronize(stop);
float inference_time;
cudaEventElapsedTime(&inference_time, start, stop);
// deallocate the nodes allocated for the last generation inside SymFit
for (auto i = 0; i < hyper_params.population_size; ++i) {
program tmp = program();
raft::copy(&tmp, final_progs + i, 1, stream);
rmm::mr::get_current_device_resource()->deallocate(tmp.nodes, tmp.len * sizeof(node), stream);
tmp.nodes = nullptr;
}
// deallocate the final programs from device memory
rmm::mr::get_current_device_resource()->deallocate(
final_progs, hyper_params.population_size * sizeof(program), stream);
ASSERT_TRUE(compApprox(history[n_gen - 1][best_idx].raw_fitness_, 0.0036f));
std::cout << "Some Predicted test values:" << std::endl;
std::copy(
h_predlabels.begin(), h_predlabels.begin() + 10, std::ostream_iterator<float>(std::cout, ";"));
std::cout << std::endl;
std::cout << "Some Actual test values:" << std::endl;
std::copy(
h_testlab.begin(), h_testlab.begin() + 10, std::ostream_iterator<float>(std::cout, ";"));
std::cout << std::endl;
std::cout << "Training time = " << training_time << " ms" << std::endl;
std::cout << "Inference time = " << inference_time << " ms" << std::endl;
}
} // namespace genetic
} // namespace cuml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.