hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
8384a8d2182eb2578800d5298419e3c9eb3746e3.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
#include <iomanip>
#include <iostream>
#include <cstdlib>
#include <vector>
// CHECK: #include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// CHECK: #include "hipDNN.h"
#include "cudnn.h"
// CHECK: hipError_t err = (f); \
// CHECK: if (err != hipSuccess) { \
#define CUDA_CALL(f) { \
hipError_t err = (f); \
if (err != hipSuccess) { \
std::cout \
<< " Error occurred: " << err << std::endl; \
std::exit(1); \
} \
}
// CHECK: hipdnnStatus_t err = (f); \
// CHECK: if (err != HIPDNN_STATUS_SUCCESS) { \
#define CUDNN_CALL(f) { \
cudnnStatus_t err = (f); \
if (err != CUDNN_STATUS_SUCCESS) { \
std::cout \
<< " Error occurred: " << err << std::endl; \
std::exit(1); \
} \
}
__global__ void dev_const(float *px, float k) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
}
__global__ void dev_iota(float *px) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = tid;
}
void print(const float *data, int n, int c, int h, int w) {
std::vector<float> buffer(1 << 20);
// CHECK: CUDA_CALL(hipMemcpy(
CUDA_CALL(hipMemcpy(
buffer.data(), data,
n * c * h * w * sizeof(float),
// CHECK: hipMemcpyDeviceToHost));
hipMemcpyDeviceToHost));
int a = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < c; ++j) {
std::cout << "n=" << i << ", c=" << j << ":" << std::endl;
for (int k = 0; k < h; ++k) {
for (int l = 0; l < w; ++l) {
std::cout << std::setw(4) << std::right << buffer[a];
++a;
}
std::cout << std::endl;
}
}
}
std::cout << std::endl;
}
int main() {
// CHECK: hipdnnHandle_t cudnn;
cudnnHandle_t cudnn;
// CHECK: CUDNN_CALL(hipdnnCreate(&cudnn));
CUDNN_CALL(cudnnCreate(&cudnn));
// input
const int in_n = 1;
const int in_c = 1;
const int in_h = 5;
const int in_w = 5;
std::cout << "in_n: " << in_n << std::endl;
std::cout << "in_c: " << in_c << std::endl;
std::cout << "in_h: " << in_h << std::endl;
std::cout << "in_w: " << in_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnTensorDescriptor_t in_desc;
cudnnTensorDescriptor_t in_desc;
// CHECK: CUDNN_CALL(hipdnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
// CHECK: CUDNN_CALL(hipdnnSetTensor4dDescriptor(
CUDNN_CALL(cudnnSetTensor4dDescriptor(
// CHECK: in_desc, HIPDNN_TENSOR_NCHW, HIPDNN_DATA_FLOAT,
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
in_n, in_c, in_h, in_w));
float *in_data;
// CHECK: CUDA_CALL(hipMalloc(
CUDA_CALL(hipMalloc(
&in_data, in_n * in_c * in_h * in_w * sizeof(float)));
// filter
const int filt_k = 1;
const int filt_c = 1;
const int filt_h = 2;
const int filt_w = 2;
std::cout << "filt_k: " << filt_k << std::endl;
std::cout << "filt_c: " << filt_c << std::endl;
std::cout << "filt_h: " << filt_h << std::endl;
std::cout << "filt_w: " << filt_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnFilterDescriptor_t filt_desc;
cudnnFilterDescriptor_t filt_desc;
// CHECK: CUDNN_CALL(hipdnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
// CHECK: CUDNN_CALL(hipdnnSetFilter4dDescriptor(
CUDNN_CALL(cudnnSetFilter4dDescriptor(
// CHECK: filt_desc, HIPDNN_DATA_FLOAT, HIPDNN_TENSOR_NCHW,
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
filt_k, filt_c, filt_h, filt_w));
float *filt_data;
// CUDA_CALL(hipMalloc(
CUDA_CALL(hipMalloc(
&filt_data, filt_k * filt_c * filt_h * filt_w * sizeof(float)));
// convolution
const int pad_h = 1;
const int pad_w = 1;
const int str_h = 1;
const int str_w = 1;
const int dil_h = 1;
const int dil_w = 1;
std::cout << "pad_h: " << pad_h << std::endl;
std::cout << "pad_w: " << pad_w << std::endl;
std::cout << "str_h: " << str_h << std::endl;
std::cout << "str_w: " << str_w << std::endl;
std::cout << "dil_h: " << dil_h << std::endl;
std::cout << "dil_w: " << dil_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionDescriptor_t conv_desc;
// CUDNN_CALL(hipdnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
// CHECK: CUDNN_CALL(hipdnnSetConvolution2dDescriptor(
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
pad_h, pad_w, str_h, str_w, dil_h, dil_w,
// CHECK: HIPDNN_CONVOLUTION, HIPDNN_DATA_FLOAT));
CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT));
// output
int out_n;
int out_c;
int out_h;
int out_w;
// CHECK: CUDNN_CALL(hipdnnGetConvolution2dForwardOutputDim(
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, in_desc, filt_desc,
&out_n, &out_c, &out_h, &out_w));
std::cout << "out_n: " << out_n << std::endl;
std::cout << "out_c: " << out_c << std::endl;
std::cout << "out_h: " << out_h << std::endl;
std::cout << "out_w: " << out_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnTensorDescriptor_t out_desc;
cudnnTensorDescriptor_t out_desc;
// CHECK: CUDNN_CALL(hipdnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
// CHECK: CUDNN_CALL(hipdnnSetTensor4dDescriptor(
CUDNN_CALL(cudnnSetTensor4dDescriptor(
// CHECK: out_desc, HIPDNN_TENSOR_NCHW, HIPDNN_DATA_FLOAT,
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
float *out_data;
// CHECK: CUDA_CALL(hipMalloc(
CUDA_CALL(hipMalloc(
&out_data, out_n * out_c * out_h * out_w * sizeof(float)));
// algorithm
// CHECK: hipdnnConvolutionFwdAlgo_t algo;
cudnnConvolutionFwdAlgo_t algo;
// CHECK: CUDNN_CALL(hipdnnGetConvolutionForwardAlgorithm(
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnn,
in_desc, filt_desc, conv_desc, out_desc,
// CHECK: HIPDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
std::cout << "Convolution algorithm: " << algo << std::endl;
std::cout << std::endl;
// workspace
size_t ws_size;
// CHECK: CUDNN_CALL(hipdnnGetConvolutionForwardWorkspaceSize(
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnn, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
float *ws_data;
// CHECK: CUDA_CALL(hipMalloc(&ws_data, ws_size));
CUDA_CALL(hipMalloc(&ws_data, ws_size));
std::cout << "Workspace size: " << ws_size << std::endl;
std::cout << std::endl;
// perform
float alpha = 1.f;
float beta = 0.f;
// CHECK: hipLaunchKernelGGL(dev_iota, dim3(in_w * in_h), dim3(in_n * in_c), 0, 0, in_data);
// CHECK: hipLaunchKernelGGL(dev_const, dim3(filt_w * filt_h), dim3(filt_k * filt_c), 0, 0, filt_data, 1.f);
hipLaunchKernelGGL(( dev_iota), dim3(in_w * in_h), dim3(in_n * in_c), 0, 0, in_data);
hipLaunchKernelGGL(( dev_const), dim3(filt_w * filt_h), dim3(filt_k * filt_c), 0, 0, filt_data, 1.f);
// CHECK: CUDNN_CALL(hipdnnConvolutionForward(
CUDNN_CALL(cudnnConvolutionForward(
cudnn,
&alpha, in_desc, in_data, filt_desc, filt_data,
conv_desc, algo, ws_data, ws_size,
&beta, out_desc, out_data));
// results
std::cout << "in_data:" << std::endl;
print(in_data, in_n, in_c, in_h, in_w);
std::cout << "filt_data:" << std::endl;
print(filt_data, filt_k, filt_c, filt_h, filt_w);
std::cout << "out_data:" << std::endl;
print(out_data, out_n, out_c, out_h, out_w);
// finalizing
// CHECK: CUDA_CALL(hipFree(ws_data));
CUDA_CALL(hipFree(ws_data));
// CHECK: CUDA_CALL(hipFree(out_data));
CUDA_CALL(hipFree(out_data));
// CHECK: CUDNN_CALL(hipdnnDestroyTensorDescriptor(out_desc));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc));
// CHECK: CUDNN_CALL(hipdnnDestroyConvolutionDescriptor(conv_desc));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc));
// CHECK: CUDA_CALL(hipFree(filt_data));
CUDA_CALL(hipFree(filt_data));
// CHECK: CUDNN_CALL(hipdnnDestroyFilterDescriptor(filt_desc));
CUDNN_CALL(cudnnDestroyFilterDescriptor(filt_desc));
// CHECK: CUDA_CALL(hipFree(in_data));
CUDA_CALL(hipFree(in_data));
// CHECK: CUDNN_CALL(hipdnnDestroyTensorDescriptor(in_desc));
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc));
// CHECK: CUDNN_CALL(hipdnnDestroy(cudnn));
CUDNN_CALL(cudnnDestroy(cudnn));
return 0;
}
| 8384a8d2182eb2578800d5298419e3c9eb3746e3.cu | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
#include <iomanip>
#include <iostream>
#include <cstdlib>
#include <vector>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
// CHECK: #include "hipDNN.h"
#include "cudnn.h"
// CHECK: hipError_t err = (f); \
// CHECK: if (err != hipSuccess) { \
#define CUDA_CALL(f) { \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
std::cout \
<< " Error occurred: " << err << std::endl; \
std::exit(1); \
} \
}
// CHECK: hipdnnStatus_t err = (f); \
// CHECK: if (err != HIPDNN_STATUS_SUCCESS) { \
#define CUDNN_CALL(f) { \
cudnnStatus_t err = (f); \
if (err != CUDNN_STATUS_SUCCESS) { \
std::cout \
<< " Error occurred: " << err << std::endl; \
std::exit(1); \
} \
}
__global__ void dev_const(float *px, float k) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
}
__global__ void dev_iota(float *px) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = tid;
}
void print(const float *data, int n, int c, int h, int w) {
std::vector<float> buffer(1 << 20);
// CHECK: CUDA_CALL(hipMemcpy(
CUDA_CALL(cudaMemcpy(
buffer.data(), data,
n * c * h * w * sizeof(float),
// CHECK: hipMemcpyDeviceToHost));
cudaMemcpyDeviceToHost));
int a = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < c; ++j) {
std::cout << "n=" << i << ", c=" << j << ":" << std::endl;
for (int k = 0; k < h; ++k) {
for (int l = 0; l < w; ++l) {
std::cout << std::setw(4) << std::right << buffer[a];
++a;
}
std::cout << std::endl;
}
}
}
std::cout << std::endl;
}
int main() {
// CHECK: hipdnnHandle_t cudnn;
cudnnHandle_t cudnn;
// CHECK: CUDNN_CALL(hipdnnCreate(&cudnn));
CUDNN_CALL(cudnnCreate(&cudnn));
// input
const int in_n = 1;
const int in_c = 1;
const int in_h = 5;
const int in_w = 5;
std::cout << "in_n: " << in_n << std::endl;
std::cout << "in_c: " << in_c << std::endl;
std::cout << "in_h: " << in_h << std::endl;
std::cout << "in_w: " << in_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnTensorDescriptor_t in_desc;
cudnnTensorDescriptor_t in_desc;
// CHECK: CUDNN_CALL(hipdnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
// CHECK: CUDNN_CALL(hipdnnSetTensor4dDescriptor(
CUDNN_CALL(cudnnSetTensor4dDescriptor(
// CHECK: in_desc, HIPDNN_TENSOR_NCHW, HIPDNN_DATA_FLOAT,
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
in_n, in_c, in_h, in_w));
float *in_data;
// CHECK: CUDA_CALL(hipMalloc(
CUDA_CALL(cudaMalloc(
&in_data, in_n * in_c * in_h * in_w * sizeof(float)));
// filter
const int filt_k = 1;
const int filt_c = 1;
const int filt_h = 2;
const int filt_w = 2;
std::cout << "filt_k: " << filt_k << std::endl;
std::cout << "filt_c: " << filt_c << std::endl;
std::cout << "filt_h: " << filt_h << std::endl;
std::cout << "filt_w: " << filt_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnFilterDescriptor_t filt_desc;
cudnnFilterDescriptor_t filt_desc;
// CHECK: CUDNN_CALL(hipdnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
// CHECK: CUDNN_CALL(hipdnnSetFilter4dDescriptor(
CUDNN_CALL(cudnnSetFilter4dDescriptor(
// CHECK: filt_desc, HIPDNN_DATA_FLOAT, HIPDNN_TENSOR_NCHW,
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
filt_k, filt_c, filt_h, filt_w));
float *filt_data;
// CUDA_CALL(hipMalloc(
CUDA_CALL(cudaMalloc(
&filt_data, filt_k * filt_c * filt_h * filt_w * sizeof(float)));
// convolution
const int pad_h = 1;
const int pad_w = 1;
const int str_h = 1;
const int str_w = 1;
const int dil_h = 1;
const int dil_w = 1;
std::cout << "pad_h: " << pad_h << std::endl;
std::cout << "pad_w: " << pad_w << std::endl;
std::cout << "str_h: " << str_h << std::endl;
std::cout << "str_w: " << str_w << std::endl;
std::cout << "dil_h: " << dil_h << std::endl;
std::cout << "dil_w: " << dil_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionDescriptor_t conv_desc;
// CUDNN_CALL(hipdnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
// CHECK: CUDNN_CALL(hipdnnSetConvolution2dDescriptor(
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
pad_h, pad_w, str_h, str_w, dil_h, dil_w,
// CHECK: HIPDNN_CONVOLUTION, HIPDNN_DATA_FLOAT));
CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT));
// output
int out_n;
int out_c;
int out_h;
int out_w;
// CHECK: CUDNN_CALL(hipdnnGetConvolution2dForwardOutputDim(
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, in_desc, filt_desc,
&out_n, &out_c, &out_h, &out_w));
std::cout << "out_n: " << out_n << std::endl;
std::cout << "out_c: " << out_c << std::endl;
std::cout << "out_h: " << out_h << std::endl;
std::cout << "out_w: " << out_w << std::endl;
std::cout << std::endl;
// CHECK: hipdnnTensorDescriptor_t out_desc;
cudnnTensorDescriptor_t out_desc;
// CHECK: CUDNN_CALL(hipdnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
// CHECK: CUDNN_CALL(hipdnnSetTensor4dDescriptor(
CUDNN_CALL(cudnnSetTensor4dDescriptor(
// CHECK: out_desc, HIPDNN_TENSOR_NCHW, HIPDNN_DATA_FLOAT,
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
float *out_data;
// CHECK: CUDA_CALL(hipMalloc(
CUDA_CALL(cudaMalloc(
&out_data, out_n * out_c * out_h * out_w * sizeof(float)));
// algorithm
// CHECK: hipdnnConvolutionFwdAlgo_t algo;
cudnnConvolutionFwdAlgo_t algo;
// CHECK: CUDNN_CALL(hipdnnGetConvolutionForwardAlgorithm(
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnn,
in_desc, filt_desc, conv_desc, out_desc,
// CHECK: HIPDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
std::cout << "Convolution algorithm: " << algo << std::endl;
std::cout << std::endl;
// workspace
size_t ws_size;
// CHECK: CUDNN_CALL(hipdnnGetConvolutionForwardWorkspaceSize(
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnn, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
float *ws_data;
// CHECK: CUDA_CALL(hipMalloc(&ws_data, ws_size));
CUDA_CALL(cudaMalloc(&ws_data, ws_size));
std::cout << "Workspace size: " << ws_size << std::endl;
std::cout << std::endl;
// perform
float alpha = 1.f;
float beta = 0.f;
// CHECK: hipLaunchKernelGGL(dev_iota, dim3(in_w * in_h), dim3(in_n * in_c), 0, 0, in_data);
// CHECK: hipLaunchKernelGGL(dev_const, dim3(filt_w * filt_h), dim3(filt_k * filt_c), 0, 0, filt_data, 1.f);
dev_iota<<<in_w * in_h, in_n * in_c>>>(in_data);
dev_const<<<filt_w * filt_h, filt_k * filt_c>>>(filt_data, 1.f);
// CHECK: CUDNN_CALL(hipdnnConvolutionForward(
CUDNN_CALL(cudnnConvolutionForward(
cudnn,
&alpha, in_desc, in_data, filt_desc, filt_data,
conv_desc, algo, ws_data, ws_size,
&beta, out_desc, out_data));
// results
std::cout << "in_data:" << std::endl;
print(in_data, in_n, in_c, in_h, in_w);
std::cout << "filt_data:" << std::endl;
print(filt_data, filt_k, filt_c, filt_h, filt_w);
std::cout << "out_data:" << std::endl;
print(out_data, out_n, out_c, out_h, out_w);
// finalizing
// CHECK: CUDA_CALL(hipFree(ws_data));
CUDA_CALL(cudaFree(ws_data));
// CHECK: CUDA_CALL(hipFree(out_data));
CUDA_CALL(cudaFree(out_data));
// CHECK: CUDNN_CALL(hipdnnDestroyTensorDescriptor(out_desc));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc));
// CHECK: CUDNN_CALL(hipdnnDestroyConvolutionDescriptor(conv_desc));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc));
// CHECK: CUDA_CALL(hipFree(filt_data));
CUDA_CALL(cudaFree(filt_data));
// CHECK: CUDNN_CALL(hipdnnDestroyFilterDescriptor(filt_desc));
CUDNN_CALL(cudnnDestroyFilterDescriptor(filt_desc));
// CHECK: CUDA_CALL(hipFree(in_data));
CUDA_CALL(cudaFree(in_data));
// CHECK: CUDNN_CALL(hipdnnDestroyTensorDescriptor(in_desc));
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc));
// CHECK: CUDNN_CALL(hipdnnDestroy(cudnn));
CUDNN_CALL(cudnnDestroy(cudnn));
return 0;
}
|
e2d77cf8cac035c0205a3355a143be706f48bd43.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA-01.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <windows.h>
#define N 2048
#define WS 5
#define MAX_T 1024
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(hipError_t err, const char *file, int line) {
//hipError_t cudastatus = hipGetLastError();
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
getchar();
exit(EXIT_FAILURE);
}
}
void MatPrint(float *V, int n) {
for (int i = 0; i < n; i++) {
if (i % n == 0)
putchar('\n');
printf(" %2.1f", V[i]);
}
putchar('\n');
}
double getTime(LARGE_INTEGER start, LARGE_INTEGER end, LARGE_INTEGER countPerSec) {
return (double)(end.QuadPart - start.QuadPart) / countPerSec.QuadPart * 1000;
}
__global__ void FiltrMediana(float *A, float *M) {
// Wartoci skrajne
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i = 0; i < WS / 2; i++)
M[i] = A[i];
}
if (threadIdx.x == 1 && blockIdx.x == 0) {
for (int i = N - WS / 2; i < N; i++)
M[i] = A[i];
}
// Filtr mediana
// wczytanie okna
register float w[WS];
for (int i = 0; i < WS; i++)
{
w[i] = A[i + threadIdx.x + blockIdx.x * MAX_T];
}
// sortowanie w oknie
float tmp;
for (int j = 0; j < WS; j++)
{
for (int i = 0; i < WS-1; i++)
{
if (w[i] > w[i+1])
{
tmp = w[i];
w[i] = w[i+1];
w[i+1] = tmp;
}
}
}
// wpisanie wyniku (mediany z okna)
M[WS / 2 + threadIdx.x + blockIdx.x * MAX_T] = w[WS / 2];
}
int _tmain(int argc, _TCHAR* argv[]) {
size_t size = N * sizeof(float);
hipError_t e;
e = hipSetDevice(0);
HANDLE_ERROR(e);
float *h_A, *h_M;
h_A = (float *)malloc(size);
h_M = (float *)malloc(size);
for (int i = 0; i < N; i++) {
h_A[i] = i % 9;
}
float *d_A, *d_M;
e = hipMalloc(&d_A, size);
HANDLE_ERROR(e);
e = hipMalloc(&d_M, size);
HANDLE_ERROR(e);
e = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
HANDLE_ERROR(e);
printf("Sygnal wejsciowy:");
dim3 block(MAX_T - WS / 2, 1, 1);
dim3 grid(2, 1, 1);
hipLaunchKernelGGL(( FiltrMediana), dim3(grid), dim3(block), 0, 0, d_A, d_M);
e = hipMemcpy(h_M, d_M, size, hipMemcpyDeviceToHost);
HANDLE_ERROR(e);
printf("Sygnal po filtracji medianowej:");
MatPrint(h_M, N);
hipFree(d_A);
hipFree(d_M);
free(h_A);
free(h_M);
getchar();
return 0;
}
| e2d77cf8cac035c0205a3355a143be706f48bd43.cu | // CUDA-01.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include <cuda_runtime.h>
#include <stdlib.h>
#include <windows.h>
#define N 2048
#define WS 5
#define MAX_T 1024
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(cudaError_t err, const char *file, int line) {
//cudaError_t cudastatus = cudaGetLastError();
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
getchar();
exit(EXIT_FAILURE);
}
}
void MatPrint(float *V, int n) {
for (int i = 0; i < n; i++) {
if (i % n == 0)
putchar('\n');
printf(" %2.1f", V[i]);
}
putchar('\n');
}
double getTime(LARGE_INTEGER start, LARGE_INTEGER end, LARGE_INTEGER countPerSec) {
return (double)(end.QuadPart - start.QuadPart) / countPerSec.QuadPart * 1000;
}
__global__ void FiltrMediana(float *A, float *M) {
// Wartości skrajne
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i = 0; i < WS / 2; i++)
M[i] = A[i];
}
if (threadIdx.x == 1 && blockIdx.x == 0) {
for (int i = N - WS / 2; i < N; i++)
M[i] = A[i];
}
// Filtr mediana
// wczytanie okna
register float w[WS];
for (int i = 0; i < WS; i++)
{
w[i] = A[i + threadIdx.x + blockIdx.x * MAX_T];
}
// sortowanie w oknie
float tmp;
for (int j = 0; j < WS; j++)
{
for (int i = 0; i < WS-1; i++)
{
if (w[i] > w[i+1])
{
tmp = w[i];
w[i] = w[i+1];
w[i+1] = tmp;
}
}
}
// wpisanie wyniku (mediany z okna)
M[WS / 2 + threadIdx.x + blockIdx.x * MAX_T] = w[WS / 2];
}
int _tmain(int argc, _TCHAR* argv[]) {
size_t size = N * sizeof(float);
cudaError_t e;
e = cudaSetDevice(0);
HANDLE_ERROR(e);
float *h_A, *h_M;
h_A = (float *)malloc(size);
h_M = (float *)malloc(size);
for (int i = 0; i < N; i++) {
h_A[i] = i % 9;
}
float *d_A, *d_M;
e = cudaMalloc(&d_A, size);
HANDLE_ERROR(e);
e = cudaMalloc(&d_M, size);
HANDLE_ERROR(e);
e = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
HANDLE_ERROR(e);
printf("Sygnal wejsciowy:");
dim3 block(MAX_T - WS / 2, 1, 1);
dim3 grid(2, 1, 1);
FiltrMediana<<<grid, block>>>(d_A, d_M);
e = cudaMemcpy(h_M, d_M, size, cudaMemcpyDeviceToHost);
HANDLE_ERROR(e);
printf("Sygnal po filtracji medianowej:");
MatPrint(h_M, N);
cudaFree(d_A);
cudaFree(d_M);
free(h_A);
free(h_M);
getchar();
return 0;
}
|
f8432cc3959a1f2ed6488379f23b262b90abdf93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/less.hpp"
#include "complex.hpp"
__device__ bool operator<(hipComplex lhs, hipComplex rhs){
return lhs.x < rhs.x || (lhs.x == rhs.x && lhs.y < rhs.y);
}
__device__ bool operator<(hipDoubleComplex lhs, hipDoubleComplex rhs){
return lhs.x < rhs.x || (lhs.x == rhs.x && lhs.y < rhs.y);
}
template <typename T>
__global__ void less_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] < b[incb * index];
}
}
template <typename T>
void less_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, less_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( less_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
void egblas_sless(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dless(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cless(size_t n, const hipComplex* a, size_t inca, const hipComplex* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zless(size_t n, const hipDoubleComplex* a, size_t inca, const hipDoubleComplex* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
| f8432cc3959a1f2ed6488379f23b262b90abdf93.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/less.hpp"
#include "complex.hpp"
__device__ bool operator<(cuComplex lhs, cuComplex rhs){
return lhs.x < rhs.x || (lhs.x == rhs.x && lhs.y < rhs.y);
}
__device__ bool operator<(cuDoubleComplex lhs, cuDoubleComplex rhs){
return lhs.x < rhs.x || (lhs.x == rhs.x && lhs.y < rhs.y);
}
template <typename T>
__global__ void less_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] < b[incb * index];
}
}
template <typename T>
void less_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, less_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
less_kernel<T><<<gridSize, blockSize>>>(n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
void egblas_sless(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dless(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cless(size_t n, const cuComplex* a, size_t inca, const cuComplex* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zless(size_t n, const cuDoubleComplex* a, size_t inca, const cuDoubleComplex* b, size_t incb, bool* y, size_t incy) {
less_kernel_run(n, a, inca, b, incb, y, incy);
}
|
178dfc535c8e1671dcfa238fe6f51e5cb5a57821.hip | // !!! This is a file automatically generated by hipify!!!
#include "private.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CG_BEST_TOL 1e-9
#define CG_MIN_TOL 1e-1
#define CUDA_CHECK_ERR \
do { \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) { \
printf("%s:%d:%s\n ERROR_CUDA: %s\n", __FILE__, __LINE__, __func__, \
hipGetErrorString(err)); \
} \
} while (0)
#ifndef EXTRAVERBOSE
#ifndef FLOAT
#define CUBLAS(x) cublasD ## x
#define CUSPARSE(x) cusparseD ## x
#else
#define CUBLAS(x) cublasS ## x
#define CUSPARSE(x) cusparseS ## x
#endif
#else
#ifndef FLOAT
#define CUBLAS(x) CUDA_CHECK_ERR; cublasD ## x
#define CUSPARSE(x) CUDA_CHECK_ERR; cusparseD ## x
#else
#define CUBLAS(x) CUDA_CHECK_ERR; cublasS ## x
#define CUSPARSE(x) CUDA_CHECK_ERR; cusparseS ## x
#endif
#endif
static scs_int totCgIts;
static timer linsysTimer;
static scs_float totalSolveTime;
/*
CUDA matrix routines only for CSR, not CSC matrices:
CSC CSR GPU Mult
A (m x n) A' (n x m) Ag accumByATransGpu
A'(n x m) A (m x n) Agt accumByAGpu
*/
void accumByAtransGpu(const Priv * p, const scs_float *x, scs_float *y) {
/* y += A'*x
x and y MUST be on GPU already
*/
const scs_float onef = 1.0;
AMatrix * Ag = p->Ag;
CUSPARSE(csrmv)(p->cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, Ag->n, Ag->m, p->Annz, &onef, p->descr, Ag->x, Ag->p, Ag->i, x, &onef, y);
}
void accumByAGpu(const Priv * p, const scs_float *x, scs_float *y) {
/* y += A*x
x and y MUST be on GPU already
*/
const scs_float onef = 1.0;
AMatrix * Agt = p->Agt;
CUSPARSE(csrmv)(p->cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, Agt->n, Agt->m, p->Annz, &onef, p->descr, Agt->x, Agt->p, Agt->i, x, &onef, y);
}
/* do not use within pcg, reuses memory */
void accumByAtrans(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
scs_float * v_m = p->tmp_m;
scs_float * v_n = p->r;
hipMemcpy(v_m, x, A->m * sizeof(scs_float), hipMemcpyHostToDevice);
hipMemcpy(v_n, y, A->n * sizeof(scs_float), hipMemcpyHostToDevice);
accumByAtransGpu(p, v_m, v_n);
hipMemcpy(y, v_n, A->n * sizeof(scs_float), hipMemcpyDeviceToHost);
}
/* do not use within pcg, reuses memory */
void accumByA(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
scs_float * v_m = p->tmp_m;
scs_float * v_n = p->r;
hipMemcpy(v_n, x, A->n * sizeof(scs_float), hipMemcpyHostToDevice);
hipMemcpy(v_m, y, A->m * sizeof(scs_float), hipMemcpyHostToDevice);
accumByAGpu(p, v_n, v_m);
hipMemcpy(y, v_m, A->m * sizeof(scs_float), hipMemcpyDeviceToHost);
}
char * getLinSysMethod(const AMatrix * A, const Settings * s) {
char * str = (char *)scs_malloc(sizeof(char) * 128);
sprintf(str, "sparse-indirect GPU, nnz in A = %li, CG tol ~ 1/iter^(%2.2f)", (long ) A->p[A->n], s->cg_rate);
return str;
}
char * getLinSysSummary(Priv * p, const Info * info) {
char * str = (char *)scs_malloc(sizeof(char) * 128);
sprintf(str, "\tLin-sys: avg # CG iterations: %2.2f, avg solve time: %1.2es\n",
(scs_float ) totCgIts / (info->iter + 1), totalSolveTime / (info->iter + 1) / 1e3);
totCgIts = 0;
totalSolveTime = 0;
return str;
}
void cudaFreeAMatrix(AMatrix * A) {
if(A->x)
hipFree(A->x);
if(A->i)
hipFree(A->i);
if(A->p)
hipFree(A->p);
}
void freePriv(Priv * p) {
if (p) {
if (p->p)
hipFree(p->p);
if (p->r)
hipFree(p->r);
if (p->Gp)
hipFree(p->Gp);
if (p->bg)
hipFree(p->bg);
if (p->tmp_m)
hipFree(p->tmp_m);
if (p->Ag) {
cudaFreeAMatrix(p->Ag);
scs_free(p->Ag);
}
if (p->Agt) {
cudaFreeAMatrix(p->Agt);
scs_free(p->Agt);
}
hipsparseDestroy(p->cusparseHandle);
hipblasDestroy(p->cublasHandle);
hipDeviceReset();
scs_free(p);
}
}
/*y = (RHO_X * I + A'A)x */
static void matVec(const AMatrix * A, const Settings * s, Priv * p, const scs_float * x, scs_float * y) {
/* x and y MUST already be loaded to GPU */
scs_float * tmp_m = p->tmp_m; /* temp memory */
hipMemset(tmp_m, 0, A->m * sizeof(scs_float));
accumByAGpu(p, x, tmp_m);
hipMemset(y, 0, A->n * sizeof(scs_float));
accumByAtransGpu(p, tmp_m, y);
CUBLAS(axpy)(p->cublasHandle, A->n, &(s->rho_x), x, 1, y, 1);
}
Priv * initPriv(const AMatrix * A, const Settings * stgs) {
hipError_t err;
Priv * p = (Priv *)scs_calloc(1, sizeof(Priv));
p->Annz = A->p[A->n];
p->cublasHandle = 0;
p->cusparseHandle = 0;
p->descr = 0;
totalSolveTime = 0;
totCgIts = 0;
/* Get handle to the CUBLAS context */
hipblasCreate(&p->cublasHandle);
/* Get handle to the CUSPARSE context */
hipsparseCreate(&p->cusparseHandle);
/* Matrix description */
hipsparseCreateMatDescr(&p->descr);
hipsparseSetMatType(p->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(p->descr, HIPSPARSE_INDEX_BASE_ZERO);
AMatrix * Ag = (AMatrix *)scs_malloc(sizeof(AMatrix));
Ag->n = A->n;
Ag->m = A->m;
p->Ag = Ag;
AMatrix * Agt = (AMatrix *)scs_malloc(sizeof(AMatrix));
Agt->n = A->m;
Agt->m = A->n;
p->Agt = Agt;
hipMalloc((void **)&Ag->i, (A->p[A->n]) * sizeof(scs_int));
hipMalloc((void **)&Ag->p, (A->n + 1) * sizeof(scs_int));
hipMalloc((void **)&Ag->x, (A->p[A->n]) * sizeof(scs_float));
hipMalloc((void **)&p->p, A->n * sizeof(scs_float));
hipMalloc((void **)&p->r, A->n * sizeof(scs_float));
hipMalloc((void **)&p->Gp, A->n * sizeof(scs_float));
hipMalloc((void **)&p->bg, (A->n + A->m) * sizeof(scs_float));
hipMalloc((void **)&p->tmp_m, A->m * sizeof(scs_float)); /* intermediate result */
hipMemcpy(Ag->i, A->i, (A->p[A->n]) * sizeof(scs_int), hipMemcpyHostToDevice);
hipMemcpy(Ag->p, A->p, (A->n + 1) * sizeof(scs_int), hipMemcpyHostToDevice);
hipMemcpy(Ag->x, A->x, (A->p[A->n]) * sizeof(scs_float), hipMemcpyHostToDevice);
hipMalloc((void **)&Agt->i, (A->p[A->n]) * sizeof(scs_int));
hipMalloc((void **)&Agt->p, (A->m + 1) * sizeof(scs_int));
hipMalloc((void **)&Agt->x, (A->p[A->n]) * sizeof(scs_float));
/* transpose Ag into Agt for faster multiplies */
/* TODO: memory intensive, could perform transpose in CPU and copy to GPU */
CUSPARSE(csr2csc)(p->cusparseHandle, A->n, A->m, A->p[A->n], Ag->x, Ag->p, Ag->i, Agt->x, Agt->i, Agt->p, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO);
err = hipGetLastError();
if (err != hipSuccess) {
printf("%s:%d:%s\nERROR_CUDA: %s\n", __FILE__, __LINE__, __func__, hipGetErrorString(err));
freePriv(p);
return SCS_NULL;
}
return p;
}
/* solves (I+A'A)x = b, s warm start, solution stored in bg (on GPU) */
static scs_int pcg(const AMatrix * A, const Settings * stgs, Priv * pr, const scs_float * s, scs_float * bg, scs_int max_its,
scs_float tol) {
scs_int i, n = A->n;
scs_float alpha, nrm_r, nrm_r_old, pGp, negAlpha, beta;
scs_float onef = 1.0, negOnef = -1.0;
scs_float *p = pr->p; /* cg direction */
scs_float *Gp = pr->Gp; /* updated CG direction */
scs_float *r = pr->r; /* cg residual */
hipblasHandle_t cublasHandle = pr->cublasHandle;
if (s == SCS_NULL) {
hipMemcpy(r, bg, n * sizeof(scs_float), hipMemcpyDeviceToDevice);
hipMemset(bg, 0, n * sizeof(scs_float));
} else {
/* p contains bg temporarily */
hipMemcpy(p, bg, n * sizeof(scs_float), hipMemcpyDeviceToDevice);
/* bg contains s */
hipMemcpy(bg, s, n * sizeof(scs_float), hipMemcpyHostToDevice);
matVec(A, stgs, pr, bg, r);
CUBLAS(axpy)(cublasHandle, n, &negOnef, p, 1, r, 1);
CUBLAS(scal)(cublasHandle, n, &negOnef, r, 1);
}
/* for some reason nrm2 is VERY slow */
/* CUBLAS(nrm2)(cublasHandle, n, r, 1, &nrm_r); */
CUBLAS(dot)(cublasHandle, n, r, 1, r, 1, &nrm_r);
nrm_r = SQRTF(nrm_r);
/* check to see if we need to run CG at all */
if (nrm_r < MIN(tol, 1e-18)) {
return 0;
}
/* put p in r, replacing temp mem */
hipMemcpy(p, r, n * sizeof(scs_float), hipMemcpyDeviceToDevice);
for (i = 0; i < max_its; ++i) {
matVec(A, stgs, pr, p, Gp);
CUBLAS(dot)(cublasHandle, n, p, 1, Gp, 1, &pGp);
alpha = (nrm_r * nrm_r) / pGp;
negAlpha = -alpha;
CUBLAS(axpy)(cublasHandle, n, &alpha, p, 1, bg, 1);
CUBLAS(axpy)(cublasHandle, n, &negAlpha, Gp, 1, r, 1);
nrm_r_old = nrm_r;
/* for some reason nrm2 is VERY slow */
/* CUBLAS(nrm2)(cublasHandle, n, r, 1, &nrm_r); */
CUBLAS(dot)(cublasHandle, n, r, 1, r, 1, &nrm_r);
nrm_r = SQRTF(nrm_r);
if (nrm_r < tol) {
i++;
break;
}
beta = (nrm_r * nrm_r) / (nrm_r_old * nrm_r_old);
CUBLAS(scal)(cublasHandle, n, &beta, p, 1);
CUBLAS(axpy)(cublasHandle, n, &onef, r, 1, p, 1);
}
#if EXTRAVERBOSE > 0
scs_printf("tol: %.4e, resid: %.4e, iters: %li\n", tol, nrm_r, (long) i+1);
#endif
return i;
}
#ifdef TEST_GPU_MAT_MUL
void accumByAtransHost(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
_accumByAtrans(A->n, A->x, A->i, A->p, x, y);
}
void accumByAHost(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
_accumByA(A->n, A->x, A->i, A->p, x, y);
}
void testGpuMatMul(const AMatrix * A, Priv * p, scs_float * b) {
/* test to see if matrix multiplication codes agree */
scs_float t[A->n + A->m], u[A->n + A->m], *bg;
hipMalloc((void **)&bg, (A->n + A->m) * sizeof(scs_float));
hipMemcpy(bg, b, (A->n + A->m) * sizeof(scs_float), hipMemcpyHostToDevice);
memcpy(t, b, (A->n + A->m) * sizeof(scs_float));
accumByAtransGpu(p, &(bg[A->n]), bg);
accumByAtransHost(A, p, &(t[A->n]), t);
hipMemcpy(u, bg, (A->n + A->m) * sizeof(scs_float), hipMemcpyDeviceToHost);
printf("A trans multiplication err %2.e\n", calcNormDiff(u, t, A->n));
accumByAGpu(p, bg, &(bg[A->n]));
accumByAHost(A, p, t, &(t[A->n]));
hipMemcpy(u, bg, (A->n + A->m) * sizeof(scs_float), hipMemcpyDeviceToHost);
printf("A multiplcation err %2.e\n", calcNormDiff(&(u[A->n]), &(t[A->n]), A->m));
hipFree(bg);
}
#endif
scs_int solveLinSys(const AMatrix * A, const Settings * stgs, Priv * p, scs_float * b, const scs_float * s, scs_int iter) {
scs_int cgIts;
scs_float * bg = p->bg;
scs_float negOnef = -1.0;
scs_float cgTol = calcNorm(b, A->n) * (iter < 0 ? CG_BEST_TOL : CG_MIN_TOL / POWF((scs_float) iter + 1, stgs->cg_rate));
tic(&linsysTimer);
/* solves Mx = b, for x but stores result in b */
/* s contains warm-start (if available) */
#ifdef TEST_GPU_MAT_MUL
testGpuMatMul(A, p, b);
#endif
/* all on GPU */
hipMemcpy(bg, b, (A->n + A->m) * sizeof(scs_float), hipMemcpyHostToDevice);
accumByAtransGpu(p, &(bg[A->n]), bg);
/* solves (I+A'A)x = b, s warm start, solution stored in b */
cgIts = pcg(A, stgs, p, s, bg, A->n, MAX(cgTol, CG_BEST_TOL));
CUBLAS(scal)(p->cublasHandle, A->m, &negOnef, &(bg[A->n]), 1);
accumByAGpu(p, bg, &(bg[A->n]));
hipMemcpy(b, bg, (A->n + A->m) * sizeof(scs_float), hipMemcpyDeviceToHost);
if (iter >= 0) {
totCgIts += cgIts;
}
totalSolveTime += tocq(&linsysTimer);
#if EXTRAVERBOSE > 0
scs_printf("linsys solve time: %1.2es\n", tocq(&linsysTimer) / 1e3);
#endif
return 0;
}
#ifdef __cplusplus
}
#endif
| 178dfc535c8e1671dcfa238fe6f51e5cb5a57821.cu | #include "private.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CG_BEST_TOL 1e-9
#define CG_MIN_TOL 1e-1
#define CUDA_CHECK_ERR \
do { \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) { \
printf("%s:%d:%s\n ERROR_CUDA: %s\n", __FILE__, __LINE__, __func__, \
cudaGetErrorString(err)); \
} \
} while (0)
#ifndef EXTRAVERBOSE
#ifndef FLOAT
#define CUBLAS(x) cublasD ## x
#define CUSPARSE(x) cusparseD ## x
#else
#define CUBLAS(x) cublasS ## x
#define CUSPARSE(x) cusparseS ## x
#endif
#else
#ifndef FLOAT
#define CUBLAS(x) CUDA_CHECK_ERR; cublasD ## x
#define CUSPARSE(x) CUDA_CHECK_ERR; cusparseD ## x
#else
#define CUBLAS(x) CUDA_CHECK_ERR; cublasS ## x
#define CUSPARSE(x) CUDA_CHECK_ERR; cusparseS ## x
#endif
#endif
static scs_int totCgIts;
static timer linsysTimer;
static scs_float totalSolveTime;
/*
CUDA matrix routines only for CSR, not CSC matrices:
CSC CSR GPU Mult
A (m x n) A' (n x m) Ag accumByATransGpu
A'(n x m) A (m x n) Agt accumByAGpu
*/
void accumByAtransGpu(const Priv * p, const scs_float *x, scs_float *y) {
/* y += A'*x
x and y MUST be on GPU already
*/
const scs_float onef = 1.0;
AMatrix * Ag = p->Ag;
CUSPARSE(csrmv)(p->cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, Ag->n, Ag->m, p->Annz, &onef, p->descr, Ag->x, Ag->p, Ag->i, x, &onef, y);
}
void accumByAGpu(const Priv * p, const scs_float *x, scs_float *y) {
/* y += A*x
x and y MUST be on GPU already
*/
const scs_float onef = 1.0;
AMatrix * Agt = p->Agt;
CUSPARSE(csrmv)(p->cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, Agt->n, Agt->m, p->Annz, &onef, p->descr, Agt->x, Agt->p, Agt->i, x, &onef, y);
}
/* do not use within pcg, reuses memory */
void accumByAtrans(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
scs_float * v_m = p->tmp_m;
scs_float * v_n = p->r;
cudaMemcpy(v_m, x, A->m * sizeof(scs_float), cudaMemcpyHostToDevice);
cudaMemcpy(v_n, y, A->n * sizeof(scs_float), cudaMemcpyHostToDevice);
accumByAtransGpu(p, v_m, v_n);
cudaMemcpy(y, v_n, A->n * sizeof(scs_float), cudaMemcpyDeviceToHost);
}
/* do not use within pcg, reuses memory */
void accumByA(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
scs_float * v_m = p->tmp_m;
scs_float * v_n = p->r;
cudaMemcpy(v_n, x, A->n * sizeof(scs_float), cudaMemcpyHostToDevice);
cudaMemcpy(v_m, y, A->m * sizeof(scs_float), cudaMemcpyHostToDevice);
accumByAGpu(p, v_n, v_m);
cudaMemcpy(y, v_m, A->m * sizeof(scs_float), cudaMemcpyDeviceToHost);
}
char * getLinSysMethod(const AMatrix * A, const Settings * s) {
char * str = (char *)scs_malloc(sizeof(char) * 128);
sprintf(str, "sparse-indirect GPU, nnz in A = %li, CG tol ~ 1/iter^(%2.2f)", (long ) A->p[A->n], s->cg_rate);
return str;
}
char * getLinSysSummary(Priv * p, const Info * info) {
char * str = (char *)scs_malloc(sizeof(char) * 128);
sprintf(str, "\tLin-sys: avg # CG iterations: %2.2f, avg solve time: %1.2es\n",
(scs_float ) totCgIts / (info->iter + 1), totalSolveTime / (info->iter + 1) / 1e3);
totCgIts = 0;
totalSolveTime = 0;
return str;
}
void cudaFreeAMatrix(AMatrix * A) {
if(A->x)
cudaFree(A->x);
if(A->i)
cudaFree(A->i);
if(A->p)
cudaFree(A->p);
}
void freePriv(Priv * p) {
if (p) {
if (p->p)
cudaFree(p->p);
if (p->r)
cudaFree(p->r);
if (p->Gp)
cudaFree(p->Gp);
if (p->bg)
cudaFree(p->bg);
if (p->tmp_m)
cudaFree(p->tmp_m);
if (p->Ag) {
cudaFreeAMatrix(p->Ag);
scs_free(p->Ag);
}
if (p->Agt) {
cudaFreeAMatrix(p->Agt);
scs_free(p->Agt);
}
cusparseDestroy(p->cusparseHandle);
cublasDestroy(p->cublasHandle);
cudaDeviceReset();
scs_free(p);
}
}
/*y = (RHO_X * I + A'A)x */
static void matVec(const AMatrix * A, const Settings * s, Priv * p, const scs_float * x, scs_float * y) {
/* x and y MUST already be loaded to GPU */
scs_float * tmp_m = p->tmp_m; /* temp memory */
cudaMemset(tmp_m, 0, A->m * sizeof(scs_float));
accumByAGpu(p, x, tmp_m);
cudaMemset(y, 0, A->n * sizeof(scs_float));
accumByAtransGpu(p, tmp_m, y);
CUBLAS(axpy)(p->cublasHandle, A->n, &(s->rho_x), x, 1, y, 1);
}
Priv * initPriv(const AMatrix * A, const Settings * stgs) {
cudaError_t err;
Priv * p = (Priv *)scs_calloc(1, sizeof(Priv));
p->Annz = A->p[A->n];
p->cublasHandle = 0;
p->cusparseHandle = 0;
p->descr = 0;
totalSolveTime = 0;
totCgIts = 0;
/* Get handle to the CUBLAS context */
cublasCreate(&p->cublasHandle);
/* Get handle to the CUSPARSE context */
cusparseCreate(&p->cusparseHandle);
/* Matrix description */
cusparseCreateMatDescr(&p->descr);
cusparseSetMatType(p->descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(p->descr, CUSPARSE_INDEX_BASE_ZERO);
AMatrix * Ag = (AMatrix *)scs_malloc(sizeof(AMatrix));
Ag->n = A->n;
Ag->m = A->m;
p->Ag = Ag;
AMatrix * Agt = (AMatrix *)scs_malloc(sizeof(AMatrix));
Agt->n = A->m;
Agt->m = A->n;
p->Agt = Agt;
cudaMalloc((void **)&Ag->i, (A->p[A->n]) * sizeof(scs_int));
cudaMalloc((void **)&Ag->p, (A->n + 1) * sizeof(scs_int));
cudaMalloc((void **)&Ag->x, (A->p[A->n]) * sizeof(scs_float));
cudaMalloc((void **)&p->p, A->n * sizeof(scs_float));
cudaMalloc((void **)&p->r, A->n * sizeof(scs_float));
cudaMalloc((void **)&p->Gp, A->n * sizeof(scs_float));
cudaMalloc((void **)&p->bg, (A->n + A->m) * sizeof(scs_float));
cudaMalloc((void **)&p->tmp_m, A->m * sizeof(scs_float)); /* intermediate result */
cudaMemcpy(Ag->i, A->i, (A->p[A->n]) * sizeof(scs_int), cudaMemcpyHostToDevice);
cudaMemcpy(Ag->p, A->p, (A->n + 1) * sizeof(scs_int), cudaMemcpyHostToDevice);
cudaMemcpy(Ag->x, A->x, (A->p[A->n]) * sizeof(scs_float), cudaMemcpyHostToDevice);
cudaMalloc((void **)&Agt->i, (A->p[A->n]) * sizeof(scs_int));
cudaMalloc((void **)&Agt->p, (A->m + 1) * sizeof(scs_int));
cudaMalloc((void **)&Agt->x, (A->p[A->n]) * sizeof(scs_float));
/* transpose Ag into Agt for faster multiplies */
/* TODO: memory intensive, could perform transpose in CPU and copy to GPU */
CUSPARSE(csr2csc)(p->cusparseHandle, A->n, A->m, A->p[A->n], Ag->x, Ag->p, Ag->i, Agt->x, Agt->i, Agt->p, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("%s:%d:%s\nERROR_CUDA: %s\n", __FILE__, __LINE__, __func__, cudaGetErrorString(err));
freePriv(p);
return SCS_NULL;
}
return p;
}
/* solves (I+A'A)x = b, s warm start, solution stored in bg (on GPU) */
static scs_int pcg(const AMatrix * A, const Settings * stgs, Priv * pr, const scs_float * s, scs_float * bg, scs_int max_its,
scs_float tol) {
scs_int i, n = A->n;
scs_float alpha, nrm_r, nrm_r_old, pGp, negAlpha, beta;
scs_float onef = 1.0, negOnef = -1.0;
scs_float *p = pr->p; /* cg direction */
scs_float *Gp = pr->Gp; /* updated CG direction */
scs_float *r = pr->r; /* cg residual */
cublasHandle_t cublasHandle = pr->cublasHandle;
if (s == SCS_NULL) {
cudaMemcpy(r, bg, n * sizeof(scs_float), cudaMemcpyDeviceToDevice);
cudaMemset(bg, 0, n * sizeof(scs_float));
} else {
/* p contains bg temporarily */
cudaMemcpy(p, bg, n * sizeof(scs_float), cudaMemcpyDeviceToDevice);
/* bg contains s */
cudaMemcpy(bg, s, n * sizeof(scs_float), cudaMemcpyHostToDevice);
matVec(A, stgs, pr, bg, r);
CUBLAS(axpy)(cublasHandle, n, &negOnef, p, 1, r, 1);
CUBLAS(scal)(cublasHandle, n, &negOnef, r, 1);
}
/* for some reason nrm2 is VERY slow */
/* CUBLAS(nrm2)(cublasHandle, n, r, 1, &nrm_r); */
CUBLAS(dot)(cublasHandle, n, r, 1, r, 1, &nrm_r);
nrm_r = SQRTF(nrm_r);
/* check to see if we need to run CG at all */
if (nrm_r < MIN(tol, 1e-18)) {
return 0;
}
/* put p in r, replacing temp mem */
cudaMemcpy(p, r, n * sizeof(scs_float), cudaMemcpyDeviceToDevice);
for (i = 0; i < max_its; ++i) {
matVec(A, stgs, pr, p, Gp);
CUBLAS(dot)(cublasHandle, n, p, 1, Gp, 1, &pGp);
alpha = (nrm_r * nrm_r) / pGp;
negAlpha = -alpha;
CUBLAS(axpy)(cublasHandle, n, &alpha, p, 1, bg, 1);
CUBLAS(axpy)(cublasHandle, n, &negAlpha, Gp, 1, r, 1);
nrm_r_old = nrm_r;
/* for some reason nrm2 is VERY slow */
/* CUBLAS(nrm2)(cublasHandle, n, r, 1, &nrm_r); */
CUBLAS(dot)(cublasHandle, n, r, 1, r, 1, &nrm_r);
nrm_r = SQRTF(nrm_r);
if (nrm_r < tol) {
i++;
break;
}
beta = (nrm_r * nrm_r) / (nrm_r_old * nrm_r_old);
CUBLAS(scal)(cublasHandle, n, &beta, p, 1);
CUBLAS(axpy)(cublasHandle, n, &onef, r, 1, p, 1);
}
#if EXTRAVERBOSE > 0
scs_printf("tol: %.4e, resid: %.4e, iters: %li\n", tol, nrm_r, (long) i+1);
#endif
return i;
}
#ifdef TEST_GPU_MAT_MUL
void accumByAtransHost(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
_accumByAtrans(A->n, A->x, A->i, A->p, x, y);
}
void accumByAHost(const AMatrix * A, Priv * p, const scs_float *x, scs_float *y) {
_accumByA(A->n, A->x, A->i, A->p, x, y);
}
void testGpuMatMul(const AMatrix * A, Priv * p, scs_float * b) {
/* test to see if matrix multiplication codes agree */
scs_float t[A->n + A->m], u[A->n + A->m], *bg;
cudaMalloc((void **)&bg, (A->n + A->m) * sizeof(scs_float));
cudaMemcpy(bg, b, (A->n + A->m) * sizeof(scs_float), cudaMemcpyHostToDevice);
memcpy(t, b, (A->n + A->m) * sizeof(scs_float));
accumByAtransGpu(p, &(bg[A->n]), bg);
accumByAtransHost(A, p, &(t[A->n]), t);
cudaMemcpy(u, bg, (A->n + A->m) * sizeof(scs_float), cudaMemcpyDeviceToHost);
printf("A trans multiplication err %2.e\n", calcNormDiff(u, t, A->n));
accumByAGpu(p, bg, &(bg[A->n]));
accumByAHost(A, p, t, &(t[A->n]));
cudaMemcpy(u, bg, (A->n + A->m) * sizeof(scs_float), cudaMemcpyDeviceToHost);
printf("A multiplcation err %2.e\n", calcNormDiff(&(u[A->n]), &(t[A->n]), A->m));
cudaFree(bg);
}
#endif
scs_int solveLinSys(const AMatrix * A, const Settings * stgs, Priv * p, scs_float * b, const scs_float * s, scs_int iter) {
scs_int cgIts;
scs_float * bg = p->bg;
scs_float negOnef = -1.0;
scs_float cgTol = calcNorm(b, A->n) * (iter < 0 ? CG_BEST_TOL : CG_MIN_TOL / POWF((scs_float) iter + 1, stgs->cg_rate));
tic(&linsysTimer);
/* solves Mx = b, for x but stores result in b */
/* s contains warm-start (if available) */
#ifdef TEST_GPU_MAT_MUL
testGpuMatMul(A, p, b);
#endif
/* all on GPU */
cudaMemcpy(bg, b, (A->n + A->m) * sizeof(scs_float), cudaMemcpyHostToDevice);
accumByAtransGpu(p, &(bg[A->n]), bg);
/* solves (I+A'A)x = b, s warm start, solution stored in b */
cgIts = pcg(A, stgs, p, s, bg, A->n, MAX(cgTol, CG_BEST_TOL));
CUBLAS(scal)(p->cublasHandle, A->m, &negOnef, &(bg[A->n]), 1);
accumByAGpu(p, bg, &(bg[A->n]));
cudaMemcpy(b, bg, (A->n + A->m) * sizeof(scs_float), cudaMemcpyDeviceToHost);
if (iter >= 0) {
totCgIts += cgIts;
}
totalSolveTime += tocq(&linsysTimer);
#if EXTRAVERBOSE > 0
scs_printf("linsys solve time: %1.2es\n", tocq(&linsysTimer) / 1e3);
#endif
return 0;
}
#ifdef __cplusplus
}
#endif
|
c81e2d227332e19ab558eeaa449ae27a7c43f0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim) {
float dot = 0;
for(unsigned int i=0; i< matRows[row];++i){
dot += matData[matColStart[i]+row] * vec[matCols[matColStart[i]+row]];
}
out[matRowPerm[row]] = dot;
}
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
int blocksize=16;
dim3 grid_dim((dim+blocksize-1)/blocksize,1,1);
dim3 block_dim(blocksize,1,1);
hipLaunchKernelGGL(( spmvJDSKernel), dim3(grid_dim),dim3(block_dim), 0, 0, out, matColStart, matCols,
matRowPerm, matRows,
matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows,
&hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
hipMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
hipMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
hipMalloc((void **)&deviceVector, sizeof(float) * dim);
hipMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, hipMemcpyHostToDevice);
hipMemcpy(deviceVector, hostVector, sizeof(float) * dim, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows,
deviceJDSData, deviceVector, dim);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceVector);
hipFree(deviceOutput);
hipFree(deviceJDSColStart);
hipFree(deviceJDSCols);
hipFree(deviceJDSRowPerm);
hipFree(deviceJDSRows);
hipFree(deviceJDSData);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
return 0;
}
| c81e2d227332e19ab558eeaa449ae27a7c43f0d4.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim) {
float dot = 0;
for(unsigned int i=0; i< matRows[row];++i){
dot += matData[matColStart[i]+row] * vec[matCols[matColStart[i]+row]];
}
out[matRowPerm[row]] = dot;
}
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
int blocksize=16;
dim3 grid_dim((dim+blocksize-1)/blocksize,1,1);
dim3 block_dim(blocksize,1,1);
spmvJDSKernel<<<grid_dim,block_dim>>>(out, matColStart, matCols,
matRowPerm, matRows,
matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows,
&hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
cudaMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
cudaMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
cudaMalloc((void **)&deviceVector, sizeof(float) * dim);
cudaMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, cudaMemcpyHostToDevice);
cudaMemcpy(deviceVector, hostVector, sizeof(float) * dim, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows,
deviceJDSData, deviceVector, dim);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceVector);
cudaFree(deviceOutput);
cudaFree(deviceJDSColStart);
cudaFree(deviceJDSCols);
cudaFree(deviceJDSRowPerm);
cudaFree(deviceJDSRows);
cudaFree(deviceJDSData);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
return 0;
}
|
c9fb7ea593c2324779dac10fa2d5e4fcdd439a2c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Clarity is Copyright 2008 Center for Integrated Systems for Microscopy,
* Copyright 2008 University of North Carolina at Chapel Hill.
*
* Clarity is free software; you can redistribute it and/or modify it under
* the terms of the GNU Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. You can also find
* the GPL on the GNU web site (http://www.gnu.org/copyleft/gpl.html).
*
* File name: ComputePrimitivesGPU.cu
* Author: Cory Quammen <[email protected]>
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "ComputePrimitivesGPU.h"
#define DEFAULT_BLOCKS 64
#define DEFAULT_THREADS_PER_BLOCK 128
#define CLARITY_REDUCE_BLOCKS_ENV "CLARITY_REDUCE_BLOCKS"
#define CLARITY_REDUCE_THREADS_PER_BLOCK_ENV "CLARITY_REDUCE_THREADS_PER_BLOCK"
int getReduceBlocks() {
int numBlocks = DEFAULT_BLOCKS;
char *blocksString = getenv(CLARITY_REDUCE_BLOCKS_ENV);
if (blocksString) {
numBlocks = atoi(blocksString);
}
return numBlocks;
}
int getReduceThreadsPerBlock() {
int numThreadsPerBlock = DEFAULT_THREADS_PER_BLOCK;
char *threadsPerBlockString = getenv(CLARITY_REDUCE_THREADS_PER_BLOCK_ENV);
if (threadsPerBlockString) {
numThreadsPerBlock = atoi(threadsPerBlockString);
}
return numThreadsPerBlock;
}
#define CLARITY_MAP_BLOCKS_ENV "CLARITY_MAP_BLOCKS"
#define CLARITY_MAP_THREADS_PER_BLOCK_ENV "CLARITY_MAP_THREADS_PER_BLOCK"
int getMapBlocks() {
int numBlocks = DEFAULT_BLOCKS;
char *blockString = getenv(CLARITY_MAP_BLOCKS_ENV);
if (blockString) {
numBlocks = atoi(blockString);
}
return numBlocks;
}
int getMapThreadsPerBlock() {
int numThreadsPerBlock = DEFAULT_THREADS_PER_BLOCK;
char *threadsPerBlockString = getenv(CLARITY_MAP_THREADS_PER_BLOCK_ENV);
if (threadsPerBlockString) {
numThreadsPerBlock = atoi(threadsPerBlockString);
}
return numThreadsPerBlock;
}
__global__
void
ReduceSumKernelGPU(float* blockResults, float* data, int n) {
extern __shared__ float accumulator[];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
accumulator[threadIdx.x] = 0.0f;
for (int i = tid; i < n; i += incr) {
// All reads should be coalesced with this pattern.
accumulator[threadIdx.x] += data[i];
}
// Reduce the values in shared memory.
for (int d = blockDim.x >> 1; d > 0; d >>= 1) {
__syncthreads(); // Make sure all data is read before moving on.
// No bank conflicts in shared memory here.
if (threadIdx.x < d)
accumulator[threadIdx.x] += accumulator[threadIdx.x+d];
}
__syncthreads();
// Only thread 0 writes the sum to memory.
if (threadIdx.x == 0)
blockResults[blockIdx.x] = accumulator[0];
}
extern "C"
void
Clarity_ReduceSumGPU(float* result, float* buffer, int n) {
// Set up device call configuration.
dim3 gridSize(getReduceBlocks());
dim3 blockSize(getReduceThreadsPerBlock());
size_t sharedSize = sizeof(float)*blockSize.x;
// Allocate memory on the device for block-wise partial
// reductions computed by the kernel.
float *blockResultsDev = NULL;
hipMalloc((void**)&blockResultsDev, sizeof(float)*gridSize.x);
hipLaunchKernelGGL(( ReduceSumKernelGPU), dim3(gridSize), dim3(blockSize), sharedSize, 0,
blockResultsDev, buffer, n);
// Read the partial sums from the blocks back to the host.
float* blockResultsHost = (float*) malloc(sizeof(float)*gridSize.x);
hipMemcpy(blockResultsHost, blockResultsDev,
sizeof(float)*gridSize.x, hipMemcpyDeviceToHost);
// Add up the results
*result = 0.0f;
for (int i = 0; i < gridSize.x; i++) {
*result += blockResultsHost[i];
}
free(blockResultsHost);
hipFree(blockResultsDev);
}
__global__
void
MultiplyArraysComponentWiseKernelGPU(float* result, float* a, float* b, int n) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
result[i] = a[i] * b[i];
}
}
void
Clarity_MultiplyArraysComponentWiseGPU(float* result, float* a, float* b, int n) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
hipLaunchKernelGGL(( MultiplyArraysComponentWiseKernelGPU), dim3(gridSize), dim3(blockSize), 0, 0,
result, a, b, n);
}
__global__
void
DivideArraysComponentWiseKernelGPU(float* result, float* a, float* b, float value, int n) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
if (fabs(b[i]) < 1e-5) {
result[i] = value;
} else {
result[i] = a[i] / b[i];
}
}
}
void
Clarity_DivideArraysComponentWiseGPU(float* result, float* a, float* b, float value, int n) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
hipLaunchKernelGGL(( DivideArraysComponentWiseKernelGPU), dim3(gridSize), dim3(blockSize), 0, 0,
result, a, b, value, n);
hipError_t error = hipDeviceSynchronize();
if (error != hipSuccess) {
fprintf(stderr, "CUDA error: %s in file '%s' in line %i : %s.\n",
"Clarity_DivideArraysComponentWiseGPU failed", __FILE__, __LINE__,
hipGetErrorString(error));
}
}
__global__
void
ScaleArrayKernelGPU(float* result, float* a, int n, float scale) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
result[i] = a[i] * scale;
}
}
extern "C"
void
Clarity_ScaleArrayGPU(float* result, float* a, int n, float scale) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
hipLaunchKernelGGL(( ScaleArrayKernelGPU), dim3(gridSize), dim3(blockSize), 0, 0,
result, a, n, scale);
hipError_t error = hipDeviceSynchronize();
if (error != hipSuccess) {
fprintf(stderr, "CUDA error: %s in file '%s' in line %i : %s.\n",
"Clarity_ScaleArrayGPU failed", __FILE__, __LINE__,
hipGetErrorString(error));
}
}
| c9fb7ea593c2324779dac10fa2d5e4fcdd439a2c.cu | /*
* Clarity is Copyright 2008 Center for Integrated Systems for Microscopy,
* Copyright 2008 University of North Carolina at Chapel Hill.
*
* Clarity is free software; you can redistribute it and/or modify it under
* the terms of the GNU Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. You can also find
* the GPL on the GNU web site (http://www.gnu.org/copyleft/gpl.html).
*
* File name: ComputePrimitivesGPU.cu
* Author: Cory Quammen <[email protected]>
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "ComputePrimitivesGPU.h"
#define DEFAULT_BLOCKS 64
#define DEFAULT_THREADS_PER_BLOCK 128
#define CLARITY_REDUCE_BLOCKS_ENV "CLARITY_REDUCE_BLOCKS"
#define CLARITY_REDUCE_THREADS_PER_BLOCK_ENV "CLARITY_REDUCE_THREADS_PER_BLOCK"
int getReduceBlocks() {
int numBlocks = DEFAULT_BLOCKS;
char *blocksString = getenv(CLARITY_REDUCE_BLOCKS_ENV);
if (blocksString) {
numBlocks = atoi(blocksString);
}
return numBlocks;
}
int getReduceThreadsPerBlock() {
int numThreadsPerBlock = DEFAULT_THREADS_PER_BLOCK;
char *threadsPerBlockString = getenv(CLARITY_REDUCE_THREADS_PER_BLOCK_ENV);
if (threadsPerBlockString) {
numThreadsPerBlock = atoi(threadsPerBlockString);
}
return numThreadsPerBlock;
}
#define CLARITY_MAP_BLOCKS_ENV "CLARITY_MAP_BLOCKS"
#define CLARITY_MAP_THREADS_PER_BLOCK_ENV "CLARITY_MAP_THREADS_PER_BLOCK"
int getMapBlocks() {
int numBlocks = DEFAULT_BLOCKS;
char *blockString = getenv(CLARITY_MAP_BLOCKS_ENV);
if (blockString) {
numBlocks = atoi(blockString);
}
return numBlocks;
}
int getMapThreadsPerBlock() {
int numThreadsPerBlock = DEFAULT_THREADS_PER_BLOCK;
char *threadsPerBlockString = getenv(CLARITY_MAP_THREADS_PER_BLOCK_ENV);
if (threadsPerBlockString) {
numThreadsPerBlock = atoi(threadsPerBlockString);
}
return numThreadsPerBlock;
}
__global__
void
ReduceSumKernelGPU(float* blockResults, float* data, int n) {
extern __shared__ float accumulator[];
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
accumulator[threadIdx.x] = 0.0f;
for (int i = tid; i < n; i += incr) {
// All reads should be coalesced with this pattern.
accumulator[threadIdx.x] += data[i];
}
// Reduce the values in shared memory.
for (int d = blockDim.x >> 1; d > 0; d >>= 1) {
__syncthreads(); // Make sure all data is read before moving on.
// No bank conflicts in shared memory here.
if (threadIdx.x < d)
accumulator[threadIdx.x] += accumulator[threadIdx.x+d];
}
__syncthreads();
// Only thread 0 writes the sum to memory.
if (threadIdx.x == 0)
blockResults[blockIdx.x] = accumulator[0];
}
extern "C"
void
Clarity_ReduceSumGPU(float* result, float* buffer, int n) {
// Set up device call configuration.
dim3 gridSize(getReduceBlocks());
dim3 blockSize(getReduceThreadsPerBlock());
size_t sharedSize = sizeof(float)*blockSize.x;
// Allocate memory on the device for block-wise partial
// reductions computed by the kernel.
float *blockResultsDev = NULL;
cudaMalloc((void**)&blockResultsDev, sizeof(float)*gridSize.x);
ReduceSumKernelGPU<<<gridSize, blockSize, sharedSize>>>
(blockResultsDev, buffer, n);
// Read the partial sums from the blocks back to the host.
float* blockResultsHost = (float*) malloc(sizeof(float)*gridSize.x);
cudaMemcpy(blockResultsHost, blockResultsDev,
sizeof(float)*gridSize.x, cudaMemcpyDeviceToHost);
// Add up the results
*result = 0.0f;
for (int i = 0; i < gridSize.x; i++) {
*result += blockResultsHost[i];
}
free(blockResultsHost);
cudaFree(blockResultsDev);
}
__global__
void
MultiplyArraysComponentWiseKernelGPU(float* result, float* a, float* b, int n) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
result[i] = a[i] * b[i];
}
}
void
Clarity_MultiplyArraysComponentWiseGPU(float* result, float* a, float* b, int n) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
MultiplyArraysComponentWiseKernelGPU<<<gridSize, blockSize>>>
(result, a, b, n);
}
__global__
void
DivideArraysComponentWiseKernelGPU(float* result, float* a, float* b, float value, int n) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
if (fabs(b[i]) < 1e-5) {
result[i] = value;
} else {
result[i] = a[i] / b[i];
}
}
}
void
Clarity_DivideArraysComponentWiseGPU(float* result, float* a, float* b, float value, int n) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
DivideArraysComponentWiseKernelGPU<<<gridSize, blockSize>>>
(result, a, b, value, n);
cudaError error = cudaThreadSynchronize();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA error: %s in file '%s' in line %i : %s.\n",
"Clarity_DivideArraysComponentWiseGPU failed", __FILE__, __LINE__,
cudaGetErrorString(error));
}
}
__global__
void
ScaleArrayKernelGPU(float* result, float* a, int n, float scale) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int incr = gridDim.x*blockDim.x;
for (int i = tid; i < n; i += incr) {
result[i] = a[i] * scale;
}
}
extern "C"
void
Clarity_ScaleArrayGPU(float* result, float* a, int n, float scale) {
// Set up device call configuration.
dim3 gridSize(getMapBlocks());
dim3 blockSize(getMapThreadsPerBlock());
ScaleArrayKernelGPU<<<gridSize, blockSize>>>
(result, a, n, scale);
cudaError error = cudaThreadSynchronize();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA error: %s in file '%s' in line %i : %s.\n",
"Clarity_ScaleArrayGPU failed", __FILE__, __LINE__,
cudaGetErrorString(error));
}
}
|
6dba8d16ac864ee7d9966695915324b9dc0625d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpers.h"
#include "FixImproperHarmonic.h"
#include "FixHelpers.h"
#include "cutils_func.h"
#define SMALL 0.001f
#include "ImproperEvaluate.h"
namespace py = boost::python;
const std::string improperHarmonicType = "ImproperHarmonic";
FixImproperHarmonic::FixImproperHarmonic(SHARED(State) state_, std::string handle)
: FixPotentialMultiAtom (state_, handle, improperHarmonicType, true) {
readFromRestart();
}
void FixImproperHarmonic::compute(int virialMode) {
int nAtoms = state->atoms.size();
GPUData &gpd = state->gpd;
int activeIdx = gpd.activeIdx();
//printf("HELLO\n");
if (forcersGPU.size()) {
if (virialMode) {
hipLaunchKernelGGL(( compute_force_improper<ImproperHarmonicType, ImproperEvaluatorHarmonic, true>) , dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
} else {
hipLaunchKernelGGL(( compute_force_improper<ImproperHarmonicType, ImproperEvaluatorHarmonic, false>) , dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
}
}
}
void FixImproperHarmonic::singlePointEng(real *perParticleEng) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
if (forcersGPU.size()) {
hipLaunchKernelGGL(( compute_energy_improper), dim3(NBLOCK(forcersGPU.size())), dim3(PERBLOCK), sharedMemSizeForParams, 0, forcersGPU.size(), state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator);
}
}
void FixImproperHarmonic::createImproper(Atom *a, Atom *b, Atom *c, Atom *d, double k, double thetaEq, int type) {
std::vector<Atom *> atoms = {a, b, c, d};
validAtoms(atoms);
if (type == -1) {
assert(k!=COEF_DEFAULT and thetaEq!=COEF_DEFAULT);
}
forcers.push_back(ImproperHarmonic(a, b, c, d, k, thetaEq, type));
pyListInterface.updateAppendedMember();
}
void FixImproperHarmonic::setImproperTypeCoefs(int type, double k, double thetaEq) {
assert(thetaEq>=0);
ImproperHarmonic dummy(k, thetaEq, type);
setForcerType(type, dummy);
}
bool FixImproperHarmonic::readFromRestart() {
auto restData = getRestartNode();
if (restData) {
auto curr_node = restData.first_child();
while (curr_node) {
std::string tag = curr_node.name();
if (tag == "types") {
for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) {
int type;
double k;
double thetaEq;
std::string type_ = type_node.attribute("id").value();
type = std::atoi(type_.c_str());
std::string k_ = type_node.attribute("k").value();
std::string thetaEq_ = type_node.attribute("thetaEq").value();
k = std::atof(k_.c_str());
thetaEq = std::atof(thetaEq_.c_str());
setImproperTypeCoefs(type, k, thetaEq);
}
} else if (tag == "members") {
for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) {
int type;
double k;
double thetaEq;
int ids[4];
std::string type_ = member_node.attribute("type").value();
std::string atom_a = member_node.attribute("atomID_a").value();
std::string atom_b = member_node.attribute("atomID_b").value();
std::string atom_c = member_node.attribute("atomID_c").value();
std::string atom_d = member_node.attribute("atomID_d").value();
std::string k_ = member_node.attribute("k").value();
std::string thetaEq_ = member_node.attribute("thetaEq").value();
type = std::atoi(type_.c_str());
ids[0] = std::atoi(atom_a.c_str());
ids[1] = std::atoi(atom_b.c_str());
ids[2] = std::atoi(atom_c.c_str());
ids[3] = std::atoi(atom_d.c_str());
Atom * a = &state->idToAtom(ids[0]);
Atom * b = &state->idToAtom(ids[1]);
Atom * c = &state->idToAtom(ids[2]);
Atom * d = &state->idToAtom(ids[3]);
k = std::atof(k_.c_str());
thetaEq = std::atof(thetaEq_.c_str());
createImproper(a, b, c, d, k, thetaEq, type);
}
}
curr_node = curr_node.next_sibling();
}
}
return true;
}
void export_FixImproperHarmonic() {
boost::python::class_<FixImproperHarmonic,
SHARED(FixImproperHarmonic),
boost::python::bases<Fix, TypedItemHolder> > (
"FixImproperHarmonic",
boost::python::init<SHARED(State), std::string> (
boost::python::args("state", "handle"))
)
.def("createImproper", &FixImproperHarmonic::createImproper,
(boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("thetaEq")=COEF_DEFAULT,
boost::python::arg("type")=-1)
)
.def("setImproperTypeCoefs", &FixImproperHarmonic::setImproperTypeCoefs,
(boost::python::arg("type")=COEF_DEFAULT,
boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("thetaEq")=COEF_DEFAULT
)
)
.def_readonly("impropers", &FixImproperHarmonic::pyForcers)
;
}
| 6dba8d16ac864ee7d9966695915324b9dc0625d2.cu | #include "helpers.h"
#include "FixImproperHarmonic.h"
#include "FixHelpers.h"
#include "cutils_func.h"
#define SMALL 0.001f
#include "ImproperEvaluate.h"
namespace py = boost::python;
const std::string improperHarmonicType = "ImproperHarmonic";
FixImproperHarmonic::FixImproperHarmonic(SHARED(State) state_, std::string handle)
: FixPotentialMultiAtom (state_, handle, improperHarmonicType, true) {
readFromRestart();
}
void FixImproperHarmonic::compute(int virialMode) {
int nAtoms = state->atoms.size();
GPUData &gpd = state->gpd;
int activeIdx = gpd.activeIdx();
//printf("HELLO\n");
if (forcersGPU.size()) {
if (virialMode) {
compute_force_improper<ImproperHarmonicType, ImproperEvaluatorHarmonic, true> <<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
} else {
compute_force_improper<ImproperHarmonicType, ImproperEvaluatorHarmonic, false> <<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
}
}
}
void FixImproperHarmonic::singlePointEng(real *perParticleEng) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
if (forcersGPU.size()) {
compute_energy_improper<<<NBLOCK(forcersGPU.size()), PERBLOCK, sharedMemSizeForParams>>>(forcersGPU.size(), state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator);
}
}
void FixImproperHarmonic::createImproper(Atom *a, Atom *b, Atom *c, Atom *d, double k, double thetaEq, int type) {
std::vector<Atom *> atoms = {a, b, c, d};
validAtoms(atoms);
if (type == -1) {
assert(k!=COEF_DEFAULT and thetaEq!=COEF_DEFAULT);
}
forcers.push_back(ImproperHarmonic(a, b, c, d, k, thetaEq, type));
pyListInterface.updateAppendedMember();
}
void FixImproperHarmonic::setImproperTypeCoefs(int type, double k, double thetaEq) {
assert(thetaEq>=0);
ImproperHarmonic dummy(k, thetaEq, type);
setForcerType(type, dummy);
}
bool FixImproperHarmonic::readFromRestart() {
auto restData = getRestartNode();
if (restData) {
auto curr_node = restData.first_child();
while (curr_node) {
std::string tag = curr_node.name();
if (tag == "types") {
for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) {
int type;
double k;
double thetaEq;
std::string type_ = type_node.attribute("id").value();
type = std::atoi(type_.c_str());
std::string k_ = type_node.attribute("k").value();
std::string thetaEq_ = type_node.attribute("thetaEq").value();
k = std::atof(k_.c_str());
thetaEq = std::atof(thetaEq_.c_str());
setImproperTypeCoefs(type, k, thetaEq);
}
} else if (tag == "members") {
for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) {
int type;
double k;
double thetaEq;
int ids[4];
std::string type_ = member_node.attribute("type").value();
std::string atom_a = member_node.attribute("atomID_a").value();
std::string atom_b = member_node.attribute("atomID_b").value();
std::string atom_c = member_node.attribute("atomID_c").value();
std::string atom_d = member_node.attribute("atomID_d").value();
std::string k_ = member_node.attribute("k").value();
std::string thetaEq_ = member_node.attribute("thetaEq").value();
type = std::atoi(type_.c_str());
ids[0] = std::atoi(atom_a.c_str());
ids[1] = std::atoi(atom_b.c_str());
ids[2] = std::atoi(atom_c.c_str());
ids[3] = std::atoi(atom_d.c_str());
Atom * a = &state->idToAtom(ids[0]);
Atom * b = &state->idToAtom(ids[1]);
Atom * c = &state->idToAtom(ids[2]);
Atom * d = &state->idToAtom(ids[3]);
k = std::atof(k_.c_str());
thetaEq = std::atof(thetaEq_.c_str());
createImproper(a, b, c, d, k, thetaEq, type);
}
}
curr_node = curr_node.next_sibling();
}
}
return true;
}
void export_FixImproperHarmonic() {
boost::python::class_<FixImproperHarmonic,
SHARED(FixImproperHarmonic),
boost::python::bases<Fix, TypedItemHolder> > (
"FixImproperHarmonic",
boost::python::init<SHARED(State), std::string> (
boost::python::args("state", "handle"))
)
.def("createImproper", &FixImproperHarmonic::createImproper,
(boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("thetaEq")=COEF_DEFAULT,
boost::python::arg("type")=-1)
)
.def("setImproperTypeCoefs", &FixImproperHarmonic::setImproperTypeCoefs,
(boost::python::arg("type")=COEF_DEFAULT,
boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("thetaEq")=COEF_DEFAULT
)
)
.def_readonly("impropers", &FixImproperHarmonic::pyForcers)
;
}
|
365dd9843cdec709fbbe356846177eddfc46e0d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_r1;
int xdim0_update_halo_kernel1_r1_h = -1;
__constant__ int ydim0_update_halo_kernel1_r1;
int ydim0_update_halo_kernel1_r1_h = -1;
__constant__ int xdim1_update_halo_kernel1_r1;
int xdim1_update_halo_kernel1_r1_h = -1;
__constant__ int ydim1_update_halo_kernel1_r1;
int ydim1_update_halo_kernel1_r1_h = -1;
__constant__ int xdim2_update_halo_kernel1_r1;
int xdim2_update_halo_kernel1_r1_h = -1;
__constant__ int ydim2_update_halo_kernel1_r1;
int ydim2_update_halo_kernel1_r1_h = -1;
__constant__ int xdim3_update_halo_kernel1_r1;
int xdim3_update_halo_kernel1_r1_h = -1;
__constant__ int ydim3_update_halo_kernel1_r1;
int ydim3_update_halo_kernel1_r1_h = -1;
__constant__ int xdim4_update_halo_kernel1_r1;
int xdim4_update_halo_kernel1_r1_h = -1;
__constant__ int ydim4_update_halo_kernel1_r1;
int ydim4_update_halo_kernel1_r1_h = -1;
__constant__ int xdim5_update_halo_kernel1_r1;
int xdim5_update_halo_kernel1_r1_h = -1;
__constant__ int ydim5_update_halo_kernel1_r1;
int ydim5_update_halo_kernel1_r1_h = -1;
__constant__ int xdim6_update_halo_kernel1_r1;
int xdim6_update_halo_kernel1_r1_h = -1;
__constant__ int ydim6_update_halo_kernel1_r1;
int ydim6_update_halo_kernel1_r1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_r1 * (y) + \
xdim0_update_halo_kernel1_r1 * ydim0_update_halo_kernel1_r1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_r1 * (y) + \
xdim1_update_halo_kernel1_r1 * ydim1_update_halo_kernel1_r1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_r1 * (y) + \
xdim2_update_halo_kernel1_r1 * ydim2_update_halo_kernel1_r1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_r1 * (y) + \
xdim3_update_halo_kernel1_r1 * ydim3_update_halo_kernel1_r1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_r1 * (y) + \
xdim4_update_halo_kernel1_r1 * ydim4_update_halo_kernel1_r1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_r1 * (y) + \
xdim5_update_halo_kernel1_r1 * ydim5_update_halo_kernel1_r1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_r1 * (y) + \
xdim6_update_halo_kernel1_r1 * ydim6_update_halo_kernel1_r1 * (z))
// user function
__device__
inline void
update_halo_kernel1_r1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(-1, 0, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(-1, 0, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(-1, 0, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(-1, 0, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(-1, 0, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(-1, 0, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(-1, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_r1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_r1 *
ydim0_update_halo_kernel1_r1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_r1 *
ydim1_update_halo_kernel1_r1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_r1 *
ydim2_update_halo_kernel1_r1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_r1 *
ydim3_update_halo_kernel1_r1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_r1 *
ydim4_update_halo_kernel1_r1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_r1 *
ydim5_update_halo_kernel1_r1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_r1 *
ydim6_update_halo_kernel1_r1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_r1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 8, range, 18))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(18, "update_halo_kernel1_r1");
OPS_kernels[18].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_r1_h ||
ydim0 != ydim0_update_halo_kernel1_r1_h ||
xdim1 != xdim1_update_halo_kernel1_r1_h ||
ydim1 != ydim1_update_halo_kernel1_r1_h ||
xdim2 != xdim2_update_halo_kernel1_r1_h ||
ydim2 != ydim2_update_halo_kernel1_r1_h ||
xdim3 != xdim3_update_halo_kernel1_r1_h ||
ydim3 != ydim3_update_halo_kernel1_r1_h ||
xdim4 != xdim4_update_halo_kernel1_r1_h ||
ydim4 != ydim4_update_halo_kernel1_r1_h ||
xdim5 != xdim5_update_halo_kernel1_r1_h ||
ydim5 != ydim5_update_halo_kernel1_r1_h ||
xdim6 != xdim6_update_halo_kernel1_r1_h ||
ydim6 != ydim6_update_halo_kernel1_r1_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_r1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_r1_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel1_r1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_r1_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_r1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_r1_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel1_r1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_r1_h = ydim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_r1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_r1_h = xdim2;
hipMemcpyToSymbol(ydim2_update_halo_kernel1_r1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_r1_h = ydim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_r1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_r1_h = xdim3;
hipMemcpyToSymbol(ydim3_update_halo_kernel1_r1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_r1_h = ydim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_r1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_r1_h = xdim4;
hipMemcpyToSymbol(ydim4_update_halo_kernel1_r1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_r1_h = ydim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_r1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_r1_h = xdim5;
hipMemcpyToSymbol(ydim5_update_halo_kernel1_r1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_r1_h = ydim5;
hipMemcpyToSymbol(xdim6_update_halo_kernel1_r1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_r1_h = xdim6;
hipMemcpyToSymbol(ydim6_update_halo_kernel1_r1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_r1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[18].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_r1), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[18].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[18].mpi_time += t2 - t1;
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 18;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 18;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(18, "update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
| 365dd9843cdec709fbbe356846177eddfc46e0d2.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_r1;
int xdim0_update_halo_kernel1_r1_h = -1;
__constant__ int ydim0_update_halo_kernel1_r1;
int ydim0_update_halo_kernel1_r1_h = -1;
__constant__ int xdim1_update_halo_kernel1_r1;
int xdim1_update_halo_kernel1_r1_h = -1;
__constant__ int ydim1_update_halo_kernel1_r1;
int ydim1_update_halo_kernel1_r1_h = -1;
__constant__ int xdim2_update_halo_kernel1_r1;
int xdim2_update_halo_kernel1_r1_h = -1;
__constant__ int ydim2_update_halo_kernel1_r1;
int ydim2_update_halo_kernel1_r1_h = -1;
__constant__ int xdim3_update_halo_kernel1_r1;
int xdim3_update_halo_kernel1_r1_h = -1;
__constant__ int ydim3_update_halo_kernel1_r1;
int ydim3_update_halo_kernel1_r1_h = -1;
__constant__ int xdim4_update_halo_kernel1_r1;
int xdim4_update_halo_kernel1_r1_h = -1;
__constant__ int ydim4_update_halo_kernel1_r1;
int ydim4_update_halo_kernel1_r1_h = -1;
__constant__ int xdim5_update_halo_kernel1_r1;
int xdim5_update_halo_kernel1_r1_h = -1;
__constant__ int ydim5_update_halo_kernel1_r1;
int ydim5_update_halo_kernel1_r1_h = -1;
__constant__ int xdim6_update_halo_kernel1_r1;
int xdim6_update_halo_kernel1_r1_h = -1;
__constant__ int ydim6_update_halo_kernel1_r1;
int ydim6_update_halo_kernel1_r1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_r1 * (y) + \
xdim0_update_halo_kernel1_r1 * ydim0_update_halo_kernel1_r1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_r1 * (y) + \
xdim1_update_halo_kernel1_r1 * ydim1_update_halo_kernel1_r1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_r1 * (y) + \
xdim2_update_halo_kernel1_r1 * ydim2_update_halo_kernel1_r1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_r1 * (y) + \
xdim3_update_halo_kernel1_r1 * ydim3_update_halo_kernel1_r1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_r1 * (y) + \
xdim4_update_halo_kernel1_r1 * ydim4_update_halo_kernel1_r1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_r1 * (y) + \
xdim5_update_halo_kernel1_r1 * ydim5_update_halo_kernel1_r1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_r1 * (y) + \
xdim6_update_halo_kernel1_r1 * ydim6_update_halo_kernel1_r1 * (z))
// user function
__device__
inline void
update_halo_kernel1_r1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(-1, 0, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(-1, 0, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(-1, 0, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(-1, 0, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(-1, 0, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(-1, 0, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(-1, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_r1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_r1 *
ydim0_update_halo_kernel1_r1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_r1 *
ydim1_update_halo_kernel1_r1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_r1 *
ydim2_update_halo_kernel1_r1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_r1 *
ydim3_update_halo_kernel1_r1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_r1 *
ydim4_update_halo_kernel1_r1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_r1 *
ydim5_update_halo_kernel1_r1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_r1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_r1 *
ydim6_update_halo_kernel1_r1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_r1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 8, range, 18))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(18, "update_halo_kernel1_r1");
OPS_kernels[18].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_r1_h ||
ydim0 != ydim0_update_halo_kernel1_r1_h ||
xdim1 != xdim1_update_halo_kernel1_r1_h ||
ydim1 != ydim1_update_halo_kernel1_r1_h ||
xdim2 != xdim2_update_halo_kernel1_r1_h ||
ydim2 != ydim2_update_halo_kernel1_r1_h ||
xdim3 != xdim3_update_halo_kernel1_r1_h ||
ydim3 != ydim3_update_halo_kernel1_r1_h ||
xdim4 != xdim4_update_halo_kernel1_r1_h ||
ydim4 != ydim4_update_halo_kernel1_r1_h ||
xdim5 != xdim5_update_halo_kernel1_r1_h ||
ydim5 != ydim5_update_halo_kernel1_r1_h ||
xdim6 != xdim6_update_halo_kernel1_r1_h ||
ydim6 != ydim6_update_halo_kernel1_r1_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_r1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_r1_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel1_r1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_r1_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_r1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_r1_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel1_r1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_r1_h = ydim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_r1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_r1_h = xdim2;
cudaMemcpyToSymbol(ydim2_update_halo_kernel1_r1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_r1_h = ydim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_r1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_r1_h = xdim3;
cudaMemcpyToSymbol(ydim3_update_halo_kernel1_r1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_r1_h = ydim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_r1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_r1_h = xdim4;
cudaMemcpyToSymbol(ydim4_update_halo_kernel1_r1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_r1_h = ydim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_r1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_r1_h = xdim5;
cudaMemcpyToSymbol(ydim5_update_halo_kernel1_r1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_r1_h = ydim5;
cudaMemcpyToSymbol(xdim6_update_halo_kernel1_r1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_r1_h = xdim6;
cudaMemcpyToSymbol(ydim6_update_halo_kernel1_r1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_r1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[18].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_r1<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[18].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[18].mpi_time += t2 - t1;
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[18].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 18;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 18;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(18, "update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
|
d390a087fe481f8a9737e117568b27736390592a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/split/split_utils.cuh>
#include <thrust/binary_search.h> // upper_bound()
#include <thrust/copy.h> // copy_if()
#include <thrust/count.h> // count_if()
#include <thrust/extrema.h> // max()
#include <thrust/transform.h> // transform()
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
/**
* @brief Base class for delimiter-based tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_split_tokenizer {
__device__ const char* get_base_ptr() const
{
return d_strings.child(strings_column_view::chars_column_index).data<char>();
}
__device__ string_view const get_string(size_type idx) const
{
return d_strings.element<string_view>(idx);
}
__device__ bool is_valid(size_type idx) const { return d_strings.is_valid(idx); }
/**
* @brief Initialize token elements for all strings.
*
* The process_tokens() only handles creating tokens for strings that contain
* delimiters. This function will initialize the output tokens for all
* strings by assigning null entries for null and empty strings and the
* string itself for strings with no delimiters.
*
* The tokens are placed in output order so that all tokens for each output
* column are stored consecutively in `d_all_tokens`.
*
* @param idx Index of string in column
* @param column_count Number of columns in output
* @param d_all_tokens Tokens vector for all strings
*/
__device__ void init_tokens(size_type idx,
size_type column_count,
string_index_pair* d_all_tokens) const
{
auto d_tokens = d_all_tokens + idx;
if (is_valid(idx)) {
auto d_str = get_string(idx);
*d_tokens = string_index_pair{d_str.data(), d_str.size_bytes()};
--column_count;
d_tokens += d_strings.size();
}
// this is like fill() but output needs to be strided
for (size_type col = 0; col < column_count; ++col)
d_tokens[d_strings.size() * col] = string_index_pair{nullptr, 0};
}
base_split_tokenizer(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: d_strings(d_strings), d_delimiter(d_delimiter), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
size_type max_tokens;
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*/
struct split_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_positions The beginning byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type column_count,
size_type const* d_token_counts,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // the first delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* str_ptr = d_str.data(); // beginning of the string
const char* const str_end_ptr = str_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto next_delim = ((idx + col) < positions_count) // boundary check for delims in last string
? (base_ptr + d_positions[idx + col]) // start of next delimiter
: str_end_ptr; // or end of this string
auto eptr = (next_delim < str_end_ptr) // make sure delimiter is inside this string
&& (col + 1 < token_count) // and this is not the last token
? next_delim
: str_end_ptr;
// store the token into the output vector
d_tokens[col * d_strings.size()] =
string_index_pair{str_ptr, static_cast<size_type>(eptr - str_ptr)};
// point past this delimiter
str_ptr = eptr + d_delimiter.size_bytes();
}
}
/**
* @brief Returns `true` if the byte at `idx` is the start of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @param chars_bytes Total number of characters to process.
* @return true if delimiter is found starting at position `idx`
*/
__device__ bool is_delimiter(size_type idx, // chars index
int32_t const* d_offsets,
size_type chars_bytes) const
{
auto d_chars = get_base_ptr() + d_offsets[0];
if (idx + d_delimiter.size_bytes() > chars_bytes) return false;
return d_delimiter.compare(d_chars + idx, d_delimiter.size_bytes()) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* @param idx Index of a delimiter
* @param d_positions Start positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx, // delimiter index
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
string_view const d_str = get_string(str_idx - 1);
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0; // re-count delimiters to compute the token-count
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + delim_length - 1) < (d_str.data() + d_str.size_bytes())) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are ignored too
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
split_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*
* Same as split_tokenizer_fn except tokens are counted from the end of each string.
*/
struct rsplit_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* The tokens are processed from the end of each string so the `max_tokens`
* is honored correctly.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_positions The ending byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // delimiter position index
size_type column_count, // number of output columns
size_type const* d_token_counts, // token counts for each string
size_type const* d_positions, // end of each delimiter
size_type positions_count, // total number of delimiters
size_type const* d_indexes, // string indices for each delimiter
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx + 1 < positions_count) && d_indexes[idx + 1] == str_idx)
return; // the last delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* const str_begin_ptr = d_str.data(); // beginning of the string
const char* str_ptr = str_begin_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto prev_delim = (idx >= col) // boundary check for delims in first string
? (base_ptr + d_positions[idx - col] + 1) // end of prev delimiter
: str_begin_ptr; // or the start of this string
auto sptr = (prev_delim > str_begin_ptr) // make sure delimiter is inside the string
&& (col + 1 < token_count) // and this is not the last token
? prev_delim
: str_begin_ptr;
// store the token into the output -- building the array backwards
d_tokens[d_strings.size() * (token_count - 1 - col)] =
string_index_pair{sptr, static_cast<size_type>(str_ptr - sptr)};
str_ptr = sptr - d_delimiter.size_bytes(); // get ready for the next prev token
}
}
/**
* @brief Returns `true` if the byte at `idx` is the end of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @param chars_bytes Total number of characters to process.
* @return true if delimiter is found ending at position `idx`
*/
__device__ bool is_delimiter(size_type idx, int32_t const* d_offsets, size_type chars_bytes) const
{
auto delim_length = d_delimiter.size_bytes();
if (idx < delim_length - 1) return false;
auto d_chars = get_base_ptr() + d_offsets[0];
return d_delimiter.compare(d_chars + idx - (delim_length - 1), delim_length) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* Token counting starts at the end of the string to honor the `max_tokens`
* appropriately.
*
* @param idx Index of a delimiter
* @param d_positions End positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx]; // 1-based string index created by upper_bound()
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
const string_view d_str = get_string(str_idx - 1); // -1 for 0-based index
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0;
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + 1 - delim_length) >= d_str.data()) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are also ignored
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
rsplit_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit().
*
* This function will first count the number of delimiters in the entire strings
* column. Next it records the position of all the delimiters. These positions
* are used for the remainder of the code to build string_index_pair elements
* for each output column.
*
* The number of tokens for each string is computed by analyzing the delimiter
* position values and mapping them to each string.
* The number of output columns is determined by the string with the most tokens.
* Next the `string_index_pairs` for the entire column are created using the
* delimiter positions and their string indices vector.
*
* Finally, each column is built by creating a vector of tokens (`string_index_pairs`)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* Output should be comparable to Pandas `split()` with `expand=True` but the
* rows/columns are transposed.
*
* ```
* import pandas as pd
* pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__'])
* print(pd_series.str.split(pat='_', expand=True))
* 0 1 2 3 4 5 6
* 0 '' None None None None None None
* 1 None None None None None None None
* 2 a b None None None None None
* 3 '' a b '' None None None
* 4 '' '' aa '' bb '' ''
* 5 '' a '' bbb '' '' c
* 6 '' aa b '' ccc '' ''
*
* print(pd_series.str.split(pat='_', n=1, expand=True))
* 0 1
* 0 '' None
* 1 None None
* 2 a b
* 3 '' a_b_
* 4 '' _aa__bb__
* 5 '' a__bbb___c
* 6 '' aa_b__ccc__
*
* print(pd_series.str.split(pat='_', n=2, expand=True))
* 0 1 2
* 0 '' None None
* 1 None None None
* 2 a b None
* 3 '' a b_
* 4 '' aa__bb__
* 5 '' a _bbb___c
* 6 '' aa b__ccc__
* ```
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_column The strings to split
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> split_fn(strings_column_view const& strings_column,
Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
std::vector<std::unique_ptr<column>> results;
auto strings_count = strings_column.size();
if (strings_count == 0) {
results.push_back(make_empty_strings_column(mr, stream));
return std::make_unique<table>(std::move(results));
}
auto execpol = rmm::exec_policy(stream);
auto d_offsets = strings_column.offsets().data<int32_t>();
d_offsets += strings_column.offset(); // nvbug-2808421 : do not combine with the previous line
auto chars_bytes = thrust::device_pointer_cast(d_offsets)[strings_count] -
thrust::device_pointer_cast(d_offsets)[0];
// count the number of delimiters in the entire column
size_type delimiter_count =
thrust::count_if(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of every delimiter position in the chars column
rmm::device_vector<size_type> delimiter_positions(delimiter_count);
auto d_positions = delimiter_positions.data().get();
auto copy_end = thrust::copy_if(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
delimiter_positions.begin(),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of string indices for each delimiter
rmm::device_vector<size_type> string_indices(delimiter_count); // these will be strings that
auto d_string_indices = string_indices.data().get(); // only contain delimiters
thrust::upper_bound(execpol->on(stream),
d_offsets,
d_offsets + strings_count,
delimiter_positions.begin(),
copy_end,
string_indices.begin());
// compute the number of tokens per string
rmm::device_vector<size_type> token_counts(strings_count);
auto d_token_counts = token_counts.data().get();
// first, initialize token counts for strings without delimiters in them
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) {
// null are 0, all others 1
return static_cast<size_type>(tokenizer.is_valid(idx));
});
// now compute the number of tokens in each string
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer, d_positions, delimiter_count, d_string_indices, d_token_counts] __device__(
size_type idx) {
tokenizer.count_tokens(idx, d_positions, delimiter_count, d_string_indices, d_token_counts);
});
// the columns_count is the maximum number of tokens for any string
size_type columns_count =
*thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end());
// boundary case: if no columns, return one null column (custrings issue #119)
if (columns_count == 0) {
results.push_back(
std::make_unique<column>(data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// create working area to hold all token positions
rmm::device_vector<string_index_pair> tokens(columns_count * strings_count);
string_index_pair* d_tokens = tokens.data().get();
// initialize the token positions
// -- accounts for nulls, empty, and strings with no delimiter in them
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, columns_count, d_tokens] __device__(size_type idx) {
tokenizer.init_tokens(idx, columns_count, d_tokens);
});
// get the positions for every token using the delimiter positions
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer,
columns_count,
d_token_counts,
d_positions,
delimiter_count,
d_string_indices,
d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(idx,
columns_count,
d_token_counts,
d_positions,
delimiter_count,
d_string_indices,
d_tokens);
});
// Create each column.
// - Each pair points to the strings for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, mr, stream));
}
return std::make_unique<table>(std::move(results));
}
/**
* @brief Base class for whitespace tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_whitespace_split_tokenizer {
// count the tokens only between non-whitespace characters
__device__ size_type count_tokens(size_type idx) const
{
if (d_strings.is_null(idx)) return 0;
const string_view d_str = d_strings.element<string_view>(idx);
size_type token_count = 0;
// run of whitespace is considered a single delimiter
bool spaces = true;
auto itr = d_str.begin();
while (itr != d_str.end()) {
char_utf8 ch = *itr;
if (spaces == (ch <= ' '))
itr++;
else {
token_count += static_cast<size_type>(spaces);
spaces = !spaces;
}
}
if (max_tokens && (token_count > max_tokens)) token_count = max_tokens;
if (token_count == 0) token_count = 1; // always at least 1 token
return token_count;
}
base_whitespace_split_tokenizer(column_device_view const& d_strings, size_type max_tokens)
: d_strings(d_strings), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings;
size_type max_tokens; // maximum number of tokens
};
/**
* @brief The tokenizer functions for split() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*/
struct whitespace_split_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type column_count,
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.next_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_idx++)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_idx - 1)] =
string_index_pair{d_str.data() + token.first, (d_str.size_bytes() - token.first)};
}
whitespace_split_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for rsplit() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*
* This one processes tokens from the end of each string.
*/
struct whitespace_rsplit_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // string position index
size_type column_count,
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str, true);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.prev_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_count - 1 - token_idx)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
++token_idx;
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_count - token_idx)] =
string_index_pair{d_str.data(), token.second};
}
whitespace_rsplit_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit() using whitespace as a delimiter.
*
* The number of tokens for each string is computed by counting consecutive characters
* between runs of whitespace in each string. The number of output columns is determined
* by the string with the most tokens. Next the string_index_pairs for the entire column
* is created.
*
* Finally, each column is built by creating a vector of tokens (string_index_pairs)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* This can be compared to Pandas `split()` with no delimiter and with `expand=True` but
* with the rows/columns transposed.
*
* import pandas as pd
* pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc '])
* print(pd_series.str.split(pat=None, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=1, expand=True))
* 0 1
* 0 None None
* 1 None None
* 2 a b
* 3 a b
* 4 aa bb
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=2, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_count The number of strings in the column
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> whitespace_split_fn(size_type strings_count,
Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto execpol = rmm::exec_policy(stream);
// compute the number of tokens per string
size_type columns_count = 0;
rmm::device_vector<size_type> token_counts(strings_count);
auto d_token_counts = token_counts.data().get();
if (strings_count > 0) {
thrust::transform(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) { return tokenizer.count_tokens(idx); });
// column count is the maximum number of tokens for any string
columns_count =
*thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end());
}
std::vector<std::unique_ptr<column>> results;
// boundary case: if no columns, return one null column (issue #119)
if (columns_count == 0) {
results.push_back(
std::make_unique<column>(data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// get the positions for every token
rmm::device_vector<string_index_pair> tokens(columns_count * strings_count);
string_index_pair* d_tokens = tokens.data().get();
thrust::fill(execpol->on(stream),
d_tokens,
d_tokens + (columns_count * strings_count),
string_index_pair{nullptr, 0});
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, columns_count, d_token_counts, d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(idx, columns_count, d_token_counts, d_tokens);
});
// Create each column.
// - Each pair points to a string for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, mr, stream));
}
return std::make_unique<table>(std::move(results));
}
} // namespace
std::unique_ptr<table> split(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_split_tokenizer_fn{*strings_device_view, max_tokens},
mr,
stream);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, split_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, mr, stream);
}
std::unique_ptr<table> rsplit(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_rsplit_tokenizer_fn{*strings_device_view, max_tokens},
mr,
stream);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, rsplit_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<table> split(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::split(strings_column, delimiter, maxsplit, mr);
}
std::unique_ptr<table> rsplit(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rsplit(strings_column, delimiter, maxsplit, mr);
}
} // namespace strings
} // namespace cudf
| d390a087fe481f8a9737e117568b27736390592a.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/split/split_utils.cuh>
#include <thrust/binary_search.h> // upper_bound()
#include <thrust/copy.h> // copy_if()
#include <thrust/count.h> // count_if()
#include <thrust/extrema.h> // max()
#include <thrust/transform.h> // transform()
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
/**
* @brief Base class for delimiter-based tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_split_tokenizer {
__device__ const char* get_base_ptr() const
{
return d_strings.child(strings_column_view::chars_column_index).data<char>();
}
__device__ string_view const get_string(size_type idx) const
{
return d_strings.element<string_view>(idx);
}
__device__ bool is_valid(size_type idx) const { return d_strings.is_valid(idx); }
/**
* @brief Initialize token elements for all strings.
*
* The process_tokens() only handles creating tokens for strings that contain
* delimiters. This function will initialize the output tokens for all
* strings by assigning null entries for null and empty strings and the
* string itself for strings with no delimiters.
*
* The tokens are placed in output order so that all tokens for each output
* column are stored consecutively in `d_all_tokens`.
*
* @param idx Index of string in column
* @param column_count Number of columns in output
* @param d_all_tokens Tokens vector for all strings
*/
__device__ void init_tokens(size_type idx,
size_type column_count,
string_index_pair* d_all_tokens) const
{
auto d_tokens = d_all_tokens + idx;
if (is_valid(idx)) {
auto d_str = get_string(idx);
*d_tokens = string_index_pair{d_str.data(), d_str.size_bytes()};
--column_count;
d_tokens += d_strings.size();
}
// this is like fill() but output needs to be strided
for (size_type col = 0; col < column_count; ++col)
d_tokens[d_strings.size() * col] = string_index_pair{nullptr, 0};
}
base_split_tokenizer(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: d_strings(d_strings), d_delimiter(d_delimiter), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
size_type max_tokens;
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*/
struct split_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_positions The beginning byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type column_count,
size_type const* d_token_counts,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // the first delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* str_ptr = d_str.data(); // beginning of the string
const char* const str_end_ptr = str_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto next_delim = ((idx + col) < positions_count) // boundary check for delims in last string
? (base_ptr + d_positions[idx + col]) // start of next delimiter
: str_end_ptr; // or end of this string
auto eptr = (next_delim < str_end_ptr) // make sure delimiter is inside this string
&& (col + 1 < token_count) // and this is not the last token
? next_delim
: str_end_ptr;
// store the token into the output vector
d_tokens[col * d_strings.size()] =
string_index_pair{str_ptr, static_cast<size_type>(eptr - str_ptr)};
// point past this delimiter
str_ptr = eptr + d_delimiter.size_bytes();
}
}
/**
* @brief Returns `true` if the byte at `idx` is the start of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @param chars_bytes Total number of characters to process.
* @return true if delimiter is found starting at position `idx`
*/
__device__ bool is_delimiter(size_type idx, // chars index
int32_t const* d_offsets,
size_type chars_bytes) const
{
auto d_chars = get_base_ptr() + d_offsets[0];
if (idx + d_delimiter.size_bytes() > chars_bytes) return false;
return d_delimiter.compare(d_chars + idx, d_delimiter.size_bytes()) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* @param idx Index of a delimiter
* @param d_positions Start positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx, // delimiter index
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
string_view const d_str = get_string(str_idx - 1);
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0; // re-count delimiters to compute the token-count
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + delim_length - 1) < (d_str.data() + d_str.size_bytes())) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are ignored too
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
split_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*
* Same as split_tokenizer_fn except tokens are counted from the end of each string.
*/
struct rsplit_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* The tokens are processed from the end of each string so the `max_tokens`
* is honored correctly.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_positions The ending byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // delimiter position index
size_type column_count, // number of output columns
size_type const* d_token_counts, // token counts for each string
size_type const* d_positions, // end of each delimiter
size_type positions_count, // total number of delimiters
size_type const* d_indexes, // string indices for each delimiter
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx + 1 < positions_count) && d_indexes[idx + 1] == str_idx)
return; // the last delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* const str_begin_ptr = d_str.data(); // beginning of the string
const char* str_ptr = str_begin_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto prev_delim = (idx >= col) // boundary check for delims in first string
? (base_ptr + d_positions[idx - col] + 1) // end of prev delimiter
: str_begin_ptr; // or the start of this string
auto sptr = (prev_delim > str_begin_ptr) // make sure delimiter is inside the string
&& (col + 1 < token_count) // and this is not the last token
? prev_delim
: str_begin_ptr;
// store the token into the output -- building the array backwards
d_tokens[d_strings.size() * (token_count - 1 - col)] =
string_index_pair{sptr, static_cast<size_type>(str_ptr - sptr)};
str_ptr = sptr - d_delimiter.size_bytes(); // get ready for the next prev token
}
}
/**
* @brief Returns `true` if the byte at `idx` is the end of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @param chars_bytes Total number of characters to process.
* @return true if delimiter is found ending at position `idx`
*/
__device__ bool is_delimiter(size_type idx, int32_t const* d_offsets, size_type chars_bytes) const
{
auto delim_length = d_delimiter.size_bytes();
if (idx < delim_length - 1) return false;
auto d_chars = get_base_ptr() + d_offsets[0];
return d_delimiter.compare(d_chars + idx - (delim_length - 1), delim_length) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* Token counting starts at the end of the string to honor the `max_tokens`
* appropriately.
*
* @param idx Index of a delimiter
* @param d_positions End positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx]; // 1-based string index created by upper_bound()
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
const string_view d_str = get_string(str_idx - 1); // -1 for 0-based index
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0;
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + 1 - delim_length) >= d_str.data()) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are also ignored
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
rsplit_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit().
*
* This function will first count the number of delimiters in the entire strings
* column. Next it records the position of all the delimiters. These positions
* are used for the remainder of the code to build string_index_pair elements
* for each output column.
*
* The number of tokens for each string is computed by analyzing the delimiter
* position values and mapping them to each string.
* The number of output columns is determined by the string with the most tokens.
* Next the `string_index_pairs` for the entire column are created using the
* delimiter positions and their string indices vector.
*
* Finally, each column is built by creating a vector of tokens (`string_index_pairs`)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* Output should be comparable to Pandas `split()` with `expand=True` but the
* rows/columns are transposed.
*
* ```
* import pandas as pd
* pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__'])
* print(pd_series.str.split(pat='_', expand=True))
* 0 1 2 3 4 5 6
* 0 '' None None None None None None
* 1 None None None None None None None
* 2 a b None None None None None
* 3 '' a b '' None None None
* 4 '' '' aa '' bb '' ''
* 5 '' a '' bbb '' '' c
* 6 '' aa b '' ccc '' ''
*
* print(pd_series.str.split(pat='_', n=1, expand=True))
* 0 1
* 0 '' None
* 1 None None
* 2 a b
* 3 '' a_b_
* 4 '' _aa__bb__
* 5 '' a__bbb___c
* 6 '' aa_b__ccc__
*
* print(pd_series.str.split(pat='_', n=2, expand=True))
* 0 1 2
* 0 '' None None
* 1 None None None
* 2 a b None
* 3 '' a b_
* 4 '' aa__bb__
* 5 '' a _bbb___c
* 6 '' aa b__ccc__
* ```
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_column The strings to split
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> split_fn(strings_column_view const& strings_column,
Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
std::vector<std::unique_ptr<column>> results;
auto strings_count = strings_column.size();
if (strings_count == 0) {
results.push_back(make_empty_strings_column(mr, stream));
return std::make_unique<table>(std::move(results));
}
auto execpol = rmm::exec_policy(stream);
auto d_offsets = strings_column.offsets().data<int32_t>();
d_offsets += strings_column.offset(); // nvbug-2808421 : do not combine with the previous line
auto chars_bytes = thrust::device_pointer_cast(d_offsets)[strings_count] -
thrust::device_pointer_cast(d_offsets)[0];
// count the number of delimiters in the entire column
size_type delimiter_count =
thrust::count_if(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of every delimiter position in the chars column
rmm::device_vector<size_type> delimiter_positions(delimiter_count);
auto d_positions = delimiter_positions.data().get();
auto copy_end = thrust::copy_if(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
delimiter_positions.begin(),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of string indices for each delimiter
rmm::device_vector<size_type> string_indices(delimiter_count); // these will be strings that
auto d_string_indices = string_indices.data().get(); // only contain delimiters
thrust::upper_bound(execpol->on(stream),
d_offsets,
d_offsets + strings_count,
delimiter_positions.begin(),
copy_end,
string_indices.begin());
// compute the number of tokens per string
rmm::device_vector<size_type> token_counts(strings_count);
auto d_token_counts = token_counts.data().get();
// first, initialize token counts for strings without delimiters in them
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) {
// null are 0, all others 1
return static_cast<size_type>(tokenizer.is_valid(idx));
});
// now compute the number of tokens in each string
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer, d_positions, delimiter_count, d_string_indices, d_token_counts] __device__(
size_type idx) {
tokenizer.count_tokens(idx, d_positions, delimiter_count, d_string_indices, d_token_counts);
});
// the columns_count is the maximum number of tokens for any string
size_type columns_count =
*thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end());
// boundary case: if no columns, return one null column (custrings issue #119)
if (columns_count == 0) {
results.push_back(
std::make_unique<column>(data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// create working area to hold all token positions
rmm::device_vector<string_index_pair> tokens(columns_count * strings_count);
string_index_pair* d_tokens = tokens.data().get();
// initialize the token positions
// -- accounts for nulls, empty, and strings with no delimiter in them
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, columns_count, d_tokens] __device__(size_type idx) {
tokenizer.init_tokens(idx, columns_count, d_tokens);
});
// get the positions for every token using the delimiter positions
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer,
columns_count,
d_token_counts,
d_positions,
delimiter_count,
d_string_indices,
d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(idx,
columns_count,
d_token_counts,
d_positions,
delimiter_count,
d_string_indices,
d_tokens);
});
// Create each column.
// - Each pair points to the strings for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, mr, stream));
}
return std::make_unique<table>(std::move(results));
}
/**
* @brief Base class for whitespace tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_whitespace_split_tokenizer {
// count the tokens only between non-whitespace characters
__device__ size_type count_tokens(size_type idx) const
{
if (d_strings.is_null(idx)) return 0;
const string_view d_str = d_strings.element<string_view>(idx);
size_type token_count = 0;
// run of whitespace is considered a single delimiter
bool spaces = true;
auto itr = d_str.begin();
while (itr != d_str.end()) {
char_utf8 ch = *itr;
if (spaces == (ch <= ' '))
itr++;
else {
token_count += static_cast<size_type>(spaces);
spaces = !spaces;
}
}
if (max_tokens && (token_count > max_tokens)) token_count = max_tokens;
if (token_count == 0) token_count = 1; // always at least 1 token
return token_count;
}
base_whitespace_split_tokenizer(column_device_view const& d_strings, size_type max_tokens)
: d_strings(d_strings), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings;
size_type max_tokens; // maximum number of tokens
};
/**
* @brief The tokenizer functions for split() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*/
struct whitespace_split_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type column_count,
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.next_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_idx++)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_idx - 1)] =
string_index_pair{d_str.data() + token.first, (d_str.size_bytes() - token.first)};
}
whitespace_split_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for rsplit() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*
* This one processes tokens from the end of each string.
*/
struct whitespace_rsplit_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param column_count Number of output columns
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // string position index
size_type column_count,
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str, true);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.prev_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_count - 1 - token_idx)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
++token_idx;
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_count - token_idx)] =
string_index_pair{d_str.data(), token.second};
}
whitespace_rsplit_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit() using whitespace as a delimiter.
*
* The number of tokens for each string is computed by counting consecutive characters
* between runs of whitespace in each string. The number of output columns is determined
* by the string with the most tokens. Next the string_index_pairs for the entire column
* is created.
*
* Finally, each column is built by creating a vector of tokens (string_index_pairs)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* This can be compared to Pandas `split()` with no delimiter and with `expand=True` but
* with the rows/columns transposed.
*
* import pandas as pd
* pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc '])
* print(pd_series.str.split(pat=None, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=1, expand=True))
* 0 1
* 0 None None
* 1 None None
* 2 a b
* 3 a b
* 4 aa bb
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=2, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_count The number of strings in the column
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> whitespace_split_fn(size_type strings_count,
Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto execpol = rmm::exec_policy(stream);
// compute the number of tokens per string
size_type columns_count = 0;
rmm::device_vector<size_type> token_counts(strings_count);
auto d_token_counts = token_counts.data().get();
if (strings_count > 0) {
thrust::transform(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) { return tokenizer.count_tokens(idx); });
// column count is the maximum number of tokens for any string
columns_count =
*thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end());
}
std::vector<std::unique_ptr<column>> results;
// boundary case: if no columns, return one null column (issue #119)
if (columns_count == 0) {
results.push_back(
std::make_unique<column>(data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// get the positions for every token
rmm::device_vector<string_index_pair> tokens(columns_count * strings_count);
string_index_pair* d_tokens = tokens.data().get();
thrust::fill(execpol->on(stream),
d_tokens,
d_tokens + (columns_count * strings_count),
string_index_pair{nullptr, 0});
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, columns_count, d_token_counts, d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(idx, columns_count, d_token_counts, d_tokens);
});
// Create each column.
// - Each pair points to a string for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, mr, stream));
}
return std::make_unique<table>(std::move(results));
}
} // namespace
std::unique_ptr<table> split(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_split_tokenizer_fn{*strings_device_view, max_tokens},
mr,
stream);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, split_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, mr, stream);
}
std::unique_ptr<table> rsplit(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_rsplit_tokenizer_fn{*strings_device_view, max_tokens},
mr,
stream);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, rsplit_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<table> split(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::split(strings_column, delimiter, maxsplit, mr);
}
std::unique_ptr<table> rsplit(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rsplit(strings_column, delimiter, maxsplit, mr);
}
} // namespace strings
} // namespace cudf
|
a72e3643574cfe2d49fa128c864f36c4f7d0797f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Vinh Le
CSCI 440 - Parallel Computing
Homework 4 - CPU GPU SCAN
Colorado School of Mines 2018
*/
#include <iostream>
#include <ctime>
#include <stdlib.h>
#include <math.h>
#include <cstdio>
using namespace std;
//Set tolerance for the check
#define TOLERANCE 0.001
#define BLOCK_SIZE 1024
__global__ void scan (int * arr, int * arr_gpu, int * aux, int n){
__shared__ float temp[BLOCK_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
if (i < n && i > 0) {
temp[tid] = arr[i-1];
}else{
temp[0]= 0;
}
int tempint;
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if(tid>=stride){
tempint = temp[tid - stride];
}
__syncthreads();
if(tid>=stride){
temp[tid] += tempint;
}
}
__syncthreads();
if(i < n) {
arr_gpu[i] = temp[tid];
}
if(tid == 0 && aux != NULL){
aux[bid]=temphipLaunchKernelGGL(([1023)];
}
}
, , < HEAD
//initialize and allocate memory for device same set as host
int * arr_d, * arr_gpu_d;
hipMalloc((void**) & arr_d, n*sizeof(int));
hipMalloc((void**) & arr_gpu_d, n*sizeof(int));
//copy data from host to device
hipMemcpy(arr_d, arr, n*sizeof(int), hipMemcpyHostToDevice);
//GPU SCAN
int NUM_BLOCK = ceil(n/BLOCK_SIZE);
printf("%d\n %d\n",n, BLOCK_SIZE );
printf("%d\n", NUM_BLOCK );
scan, NUM_BLOCK, BLOCK_SIZE, 0, 0, 0, 0, arr_d, arr_gpu_d, n);
//copy data from device to host
hipMemcpy(arr_gpu, arr_gpu_d, n*sizeof(float), hipMemcpyDeviceToHost);
//Compares arr_cpu with arr_gpu to determine accuracy
int tfail = 0;
for (int i = 0; i < n; i++) {
if (abs(arr_gpu[i] - arr_cpu[i]) > TOLERANCE) {//take abs value and compare with tolerance
tfail += 1;//if difference exceeds tolerance
}
=======
__global__ void finish (int * arr,int *aux, int NUM_BLOCK){
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid>=1){
arr[bid*BLOCK_SIZE+tid] += aux[bid];
}
__syncthreads();
>>>>>>> 389eb4edb380319ddf1c6433c3fd73fc06fb3d7e
}
/*
__global__ void finish (int * arr, int NUM_BLOCK){
int tid = threadIdx.x;
for(int j = 1; j<NUM_BLOCK;j++){
arr[j*BLOCK_SIZE+tid] += arr[j*BLOCK_SIZE-1];
__syncthreads();
}
}
*/
int main(int argc, char *argv[]){
srand(time(NULL));
int n = atoi(argv[1]);
//Generate array
cout<<"Generating "<<n<< " random numbers"<<endl;
int *arr, * arr_cpu, * arr_gpu;
arr = (int *) malloc(n*sizeof(int));
arr_cpu = (int *) malloc(n*sizeof(int));
arr_gpu = (int *) malloc(n*sizeof(int));
//fill arr with rnd nums between 1-1000
for (int i = 0; i<n; i++){
arr[i]= rand()%1000 + 1;
//arr[i]=1;//for debug
}
cout<<"CPU SCAN"<<endl;
//set 0th element
arr_cpu[0]=0;
// CPU SCAN
for (int i=1; i<n; i++) {
arr_cpu[i]= arr_cpu[i-1]+arr[i-1];
}
cout<<"GPU SCAN"<<endl;
//initialize and allocate memory for device same set as host
int * arr_d, * arr_gpu_d;
hipMalloc((void**) & arr_d, n*sizeof(int));
hipMalloc((void**) & arr_gpu_d, n*sizeof(int));
int NUM_BLOCK = ceil((float)n/BLOCK_SIZE);
int * aux_d;
hipMalloc((void**) & aux_d, NUM_BLOCK*sizeof(int));
//copy data from host to device
hipMemcpy(arr_d, arr, n*sizeof(int), hipMemcpyHostToDevice);
//GPU SCAN
hipLaunchKernelGGL(( scan), dim3(NUM_BLOCK), dim3(BLOCK_SIZE), 0, 0, arr_d, arr_gpu_d, aux_d, n);//Scan main array
hipLaunchKernelGGL(( scan), dim3(1), dim3(BLOCK_SIZE), 0, 0, aux_d, aux_d, NULL, n);//scan aux array
hipLaunchKernelGGL(( finish), dim3(NUM_BLOCK), dim3(BLOCK_SIZE), 0, 0, arr_gpu_d, aux_d, NUM_BLOCK);//add aux array to main array
//copy data from device to host
hipMemcpy(arr_gpu, arr_gpu_d, n*sizeof(int), hipMemcpyDeviceToHost);
//Compares arr_cpu with arr_gpu to determine accuracy
int tfail = 0;
for (int i = 0; i < n; i++) {
if (abs(arr_gpu[i] - arr_cpu[i]) > TOLERANCE) {//take abs value and compare with tolerance
tfail += 1;//if difference exceeds tolerance
}
}
//print the number of failures
cout << "Number of Failures: " << tfail <<"\n";
return 0;
}
| a72e3643574cfe2d49fa128c864f36c4f7d0797f.cu | /*
Vinh Le
CSCI 440 - Parallel Computing
Homework 4 - CPU GPU SCAN
Colorado School of Mines 2018
*/
#include <iostream>
#include <ctime>
#include <stdlib.h>
#include <math.h>
#include <cstdio>
using namespace std;
//Set tolerance for the check
#define TOLERANCE 0.001
#define BLOCK_SIZE 1024
__global__ void scan (int * arr, int * arr_gpu, int * aux, int n){
__shared__ float temp[BLOCK_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
if (i < n && i > 0) {
temp[tid] = arr[i-1];
}else{
temp[0]= 0;
}
int tempint;
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if(tid>=stride){
tempint = temp[tid - stride];
}
__syncthreads();
if(tid>=stride){
temp[tid] += tempint;
}
}
__syncthreads();
if(i < n) {
arr_gpu[i] = temp[tid];
}
if(tid == 0 && aux != NULL){
aux[bid]=temp[1023];
}
}
<<<<<<< HEAD
//initialize and allocate memory for device same set as host
int * arr_d, * arr_gpu_d;
cudaMalloc((void**) & arr_d, n*sizeof(int));
cudaMalloc((void**) & arr_gpu_d, n*sizeof(int));
//copy data from host to device
cudaMemcpy(arr_d, arr, n*sizeof(int), cudaMemcpyHostToDevice);
//GPU SCAN
int NUM_BLOCK = ceil(n/BLOCK_SIZE);
printf("%d\n %d\n",n, BLOCK_SIZE );
printf("%d\n", NUM_BLOCK );
scan<<<NUM_BLOCK, BLOCK_SIZE>>>(arr_d, arr_gpu_d, n);
//copy data from device to host
cudaMemcpy(arr_gpu, arr_gpu_d, n*sizeof(float), cudaMemcpyDeviceToHost);
//Compares arr_cpu with arr_gpu to determine accuracy
int tfail = 0;
for (int i = 0; i < n; i++) {
if (abs(arr_gpu[i] - arr_cpu[i]) > TOLERANCE) {//take abs value and compare with tolerance
tfail += 1;//if difference exceeds tolerance
}
=======
__global__ void finish (int * arr,int *aux, int NUM_BLOCK){
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid>=1){
arr[bid*BLOCK_SIZE+tid] += aux[bid];
}
__syncthreads();
>>>>>>> 389eb4edb380319ddf1c6433c3fd73fc06fb3d7e
}
/*
__global__ void finish (int * arr, int NUM_BLOCK){
int tid = threadIdx.x;
for(int j = 1; j<NUM_BLOCK;j++){
arr[j*BLOCK_SIZE+tid] += arr[j*BLOCK_SIZE-1];
__syncthreads();
}
}
*/
int main(int argc, char *argv[]){
srand(time(NULL));
int n = atoi(argv[1]);
//Generate array
cout<<"Generating "<<n<< " random numbers"<<endl;
int *arr, * arr_cpu, * arr_gpu;
arr = (int *) malloc(n*sizeof(int));
arr_cpu = (int *) malloc(n*sizeof(int));
arr_gpu = (int *) malloc(n*sizeof(int));
//fill arr with rnd nums between 1-1000
for (int i = 0; i<n; i++){
arr[i]= rand()%1000 + 1;
//arr[i]=1;//for debug
}
cout<<"CPU SCAN"<<endl;
//set 0th element
arr_cpu[0]=0;
// CPU SCAN
for (int i=1; i<n; i++) {
arr_cpu[i]= arr_cpu[i-1]+arr[i-1];
}
cout<<"GPU SCAN"<<endl;
//initialize and allocate memory for device same set as host
int * arr_d, * arr_gpu_d;
cudaMalloc((void**) & arr_d, n*sizeof(int));
cudaMalloc((void**) & arr_gpu_d, n*sizeof(int));
int NUM_BLOCK = ceil((float)n/BLOCK_SIZE);
int * aux_d;
cudaMalloc((void**) & aux_d, NUM_BLOCK*sizeof(int));
//copy data from host to device
cudaMemcpy(arr_d, arr, n*sizeof(int), cudaMemcpyHostToDevice);
//GPU SCAN
scan<<<NUM_BLOCK, BLOCK_SIZE>>>(arr_d, arr_gpu_d, aux_d, n);//Scan main array
scan<<<1, BLOCK_SIZE>>>(aux_d, aux_d, NULL, n);//scan aux array
finish<<<NUM_BLOCK, BLOCK_SIZE>>>(arr_gpu_d, aux_d, NUM_BLOCK);//add aux array to main array
//copy data from device to host
cudaMemcpy(arr_gpu, arr_gpu_d, n*sizeof(int), cudaMemcpyDeviceToHost);
//Compares arr_cpu with arr_gpu to determine accuracy
int tfail = 0;
for (int i = 0; i < n; i++) {
if (abs(arr_gpu[i] - arr_cpu[i]) > TOLERANCE) {//take abs value and compare with tolerance
tfail += 1;//if difference exceeds tolerance
}
}
//print the number of failures
cout << "Number of Failures: " << tfail <<"\n";
return 0;
}
|
5d78fd3a35acec7608109299f96c8feeeedb7cb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "mat_functions.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
GLOBAL void ker_log_sum( real *t, real *target, int N, real *out)
{
//extern __shared__ real sdata[];
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
if (idx < N) {
x = t[ idx ];
if (x <= 0)
out[ idx ] = log( 1. + exp(x) ) - ((target[idx] - 1.) * t[ idx ]);
else
out[ idx ] = ( x + log( exp(-x) + 1.) ) - ((target[idx] - 1.) * t[ idx] );
}
}
GLOBAL void ker_sigmoid( real *s, int N, real *out)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
real alpha = 0;
if (idx < N) {
x = s[ idx ];
if ( x < 0 )
out[ idx ] = exp( x ) / (1. + exp(x) );
else
out[ idx ] = 1. / (1. + exp(-x) );
}
}
GLOBAL void ker_sigmoid_classify( real *s, int N )
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
if (s[ idx ] <= 0 ){
if (exp(s[idx])/ ( (1. + exp(s[idx]) )) > 0.5)
s[idx] = 1.;
else
s[idx] = 0.;
} else {
if (1. / (1. + exp(-s[idx]) ) > 0.5)
s[idx] = 1.;
else
s[idx] = 0.;
}
}
}
GLOBAL void ker_sigmoid_target( real *t, real *target, int N, real *out)
{
real x = 0;
real alpha = 0;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
x = t[ idx ];
if (x < 0 )
out[idx] = ( exp(x)/ ( 1. + exp(x) )) - (target[ idx ] - 1.);
else
out[idx] = ( 1./ ( 1. + exp(-x) )) - (target[ idx ] - 1.);
}
}
GLOBAL void ker_ele_vec_product( real *t1, real *t2, int N, real *out)
{
//extern __shared__ real sdata[];
//real x = 0;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) out[ idx ] = t1[ idx ] * t2[ idx ];
//sdata[ threadIdx.x ] = x;
//if (idx < N) out[idx] = sdata[threadIdx.x] ;
}
GLOBAL void ker_mat_identity( real *matrix, real gamma, int M)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < M)
matrix[ idx * M + idx ] += gamma;
}
GLOBAL void ker_hx_matvec_reg ( real *hx, real gamma, real *vec, int c)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < c) {
hx[ idx ]+= gamma * vec[ idx ];
}
}
GLOBAL void ker_reduction(const real *input, real *per_block_results, int n)
{
extern __shared__ real sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
| 5d78fd3a35acec7608109299f96c8feeeedb7cb2.cu | #include "mat_functions.h"
#include "cuda.h"
#include "cuda_runtime.h"
GLOBAL void ker_log_sum( real *t, real *target, int N, real *out)
{
//extern __shared__ real sdata[];
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
if (idx < N) {
x = t[ idx ];
if (x <= 0)
out[ idx ] = log( 1. + exp(x) ) - ((target[idx] - 1.) * t[ idx ]);
else
out[ idx ] = ( x + log( exp(-x) + 1.) ) - ((target[idx] - 1.) * t[ idx] );
}
}
GLOBAL void ker_sigmoid( real *s, int N, real *out)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
real alpha = 0;
if (idx < N) {
x = s[ idx ];
if ( x < 0 )
out[ idx ] = exp( x ) / (1. + exp(x) );
else
out[ idx ] = 1. / (1. + exp(-x) );
}
}
GLOBAL void ker_sigmoid_classify( real *s, int N )
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
if (s[ idx ] <= 0 ){
if (exp(s[idx])/ ( (1. + exp(s[idx]) )) > 0.5)
s[idx] = 1.;
else
s[idx] = 0.;
} else {
if (1. / (1. + exp(-s[idx]) ) > 0.5)
s[idx] = 1.;
else
s[idx] = 0.;
}
}
}
GLOBAL void ker_sigmoid_target( real *t, real *target, int N, real *out)
{
real x = 0;
real alpha = 0;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
x = t[ idx ];
if (x < 0 )
out[idx] = ( exp(x)/ ( 1. + exp(x) )) - (target[ idx ] - 1.);
else
out[idx] = ( 1./ ( 1. + exp(-x) )) - (target[ idx ] - 1.);
}
}
GLOBAL void ker_ele_vec_product( real *t1, real *t2, int N, real *out)
{
//extern __shared__ real sdata[];
//real x = 0;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) out[ idx ] = t1[ idx ] * t2[ idx ];
//sdata[ threadIdx.x ] = x;
//if (idx < N) out[idx] = sdata[threadIdx.x] ;
}
GLOBAL void ker_mat_identity( real *matrix, real gamma, int M)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < M)
matrix[ idx * M + idx ] += gamma;
}
GLOBAL void ker_hx_matvec_reg ( real *hx, real gamma, real *vec, int c)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < c) {
hx[ idx ]+= gamma * vec[ idx ];
}
}
GLOBAL void ker_reduction(const real *input, real *per_block_results, int n)
{
extern __shared__ real sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
real x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
|
5ff418b5f82955f0fca6112c76fc7f1e1720a477.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 5ff418b5f82955f0fca6112c76fc7f1e1720a477.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
9a2d20883240a508c1ad907d2861d1b3a0f50637.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file spatial_transformer.cu
* \brief
* \author Wei Wu
*/
#include "./spatial_transformer-inl.h"
#include <algorithm>
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_spatial_transformer-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
template<typename DType>
__global__ void BilinearSamplingForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
index_t top_left_y = min(i_h, max(0, static_cast<int>(floor(y_real))));
index_t top_left_x = min(i_w, max(0, static_cast<int>(floor(x_real))));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = *(data + data_index);
DType top_right_v = *(data + data_index + 1);
DType bottom_left_v = *(data + data_index + i_w);
DType bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplingBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
DType* grid_src) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
index_t top_left_y = min(i_h, max(0, static_cast<int>(floor(y_real))));
index_t top_left_x = min(i_w, max(0, static_cast<int>(floor(x_real))));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = *(data + data_index);
DType top_right_v = *(data + data_index + 1);
DType bottom_left_v = *(data + data_index + i_w);
DType bottom_right_v = *(data + data_index + i_w + 1);
// calc input grad
*(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w;
*(g_input + data_index + 1) += *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w);
*(g_input + data_index+ i_w) += *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w;
*(g_input + data_index+ i_w + 1) += *(grad + grad_index) * (1.0 - top_left_y_w) *
(1.0 - top_left_x_w);
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v -
top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w);
}
// calc grid_src grad
*(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2;
*(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2;
}
}
template<typename DType>
inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 3, DType> grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
}
template<typename DType>
inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 3, DType> &grid_src_data,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data) {
DType *g_input = input_grad.dptr_;
DType *grid_src = grid_src_data.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNSpatialTransformerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SpatialTransformerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
| 9a2d20883240a508c1ad907d2861d1b3a0f50637.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file spatial_transformer.cu
* \brief
* \author Wei Wu
*/
#include "./spatial_transformer-inl.h"
#include <algorithm>
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_spatial_transformer-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
template<typename DType>
__global__ void BilinearSamplingForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
index_t top_left_y = min(i_h, max(0, static_cast<int>(floor(y_real))));
index_t top_left_x = min(i_w, max(0, static_cast<int>(floor(x_real))));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = *(data + data_index);
DType top_right_v = *(data + data_index + 1);
DType bottom_left_v = *(data + data_index + i_w);
DType bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplingBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
DType* grid_src) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
index_t top_left_y = min(i_h, max(0, static_cast<int>(floor(y_real))));
index_t top_left_x = min(i_w, max(0, static_cast<int>(floor(x_real))));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = *(data + data_index);
DType top_right_v = *(data + data_index + 1);
DType bottom_left_v = *(data + data_index + i_w);
DType bottom_right_v = *(data + data_index + i_w + 1);
// calc input grad
*(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w;
*(g_input + data_index + 1) += *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w);
*(g_input + data_index+ i_w) += *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w;
*(g_input + data_index+ i_w + 1) += *(grad + grad_index) * (1.0 - top_left_y_w) *
(1.0 - top_left_x_w);
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v -
top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w);
}
// calc grid_src grad
*(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2;
*(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2;
}
}
template<typename DType>
inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 3, DType> grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
}
template<typename DType>
inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 3, DType> &grid_src_data,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data) {
DType *g_input = input_grad.dptr_;
DType *grid_src = grid_src_data.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNSpatialTransformerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SpatialTransformerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
|
2630a67d04339d4f4326d8372b1f599b1d1b7a2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile iki
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __HIPCC__
#include <stdio.h>
#include <stdlib.h>
#include "chain.h"
#include "constants.h"
#include "cuda_utils.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "gpu_module.h"
#define MAX_THREADS MAX(THREADS_SM1, THREADS_SM2)
#define THREADS_SM1 64
#define BLOCKS_SM1 240
#define THREADS_SM2 128
#define BLOCKS_SM2 240
#define SCORE4_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
typedef struct VBus {
int* mch;
int4* scr;
int4* aff;
} VBus;
typedef struct Context {
int* queryEnd;
int* targetEnd;
int* outScore;
Chain* query;
Chain* target;
Scorer* scorer;
int score;
int card;
} Context;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int dRow_;
static __constant__ int rows_;
static __constant__ int cols_;
static __constant__ int cellWidth_;
static __constant__ int scorerLen_;
static __constant__ int subLen_;
static __constant__ int match_;
static __constant__ int mismatch_;
texture<char4> rowTexture;
texture<char> colTexture;
texture<int2> hBusTexture;
texture<int> subTexture;
//******************************************************************************
// PUBLIC
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// With visual c++ compiler and prototypes declared cuda global memory variables
// do not work. No questions asked.
#ifndef _WIN32
__device__ static int gap(int idx);
__device__ static int aff(int idx);
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, Sub sub);
#endif
static void* kernel(void* params);
//******************************************************************************
//******************************************************************************
// PUBLIC
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
Context* param = (Context*) malloc(sizeof(Context));
param->queryEnd = queryEnd;
param->targetEnd = targetEnd;
param->outScore = outScore;
param->query = query;
param->target = target;
param->scorer = scorer;
param->score = score;
param->card = card;
if (thread == NULL) {
kernel(param);
} else {
threadCreate(thread, kernel, (void*) param);
}
}
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// FUNCTORS
class SubScalarRev {
public:
__device__ int operator () (char a, char b) {
return (a == b ? match_ : mismatch_) * (a < scorerLen_ && b < scorerLen_);
}
};
class SubVector {
public:
__device__ int operator () (char a, char b) {
return tex1Dfetch(subTexture, (a * subLen_) + b);
}
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
__device__ static int gap(int idx) {
if (idx == dRow_ - 1) return 0;
if (idx < dRow_ - 1) return 0;
return -gapOpen_ - gapExtend_ * (idx - dRow_);
}
__device__ static int aff(int idx) {
return SCORE_MIN;
}
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0) return;
row -= (col < 0) * (gridDim.x * blockDim.x * 4);
col += (col < 0) * cols_;
Atom atom;
if (0 <= row && row < rows_ && col > 0) {
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
} else {
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = make_int4(aff(row), aff(row + 1), aff(row + 2), aff(row + 3));
}
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i) {
if (0 <= row && row < rows_) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (0 <= row && row < rows_) {
if (threadIdx.x == blockDim.x - 1 || i == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
++col;
if (col == cols_) {
col = 0;
row = row + gridDim.x * blockDim.x * 4;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = make_int4(aff(row), aff(row + 1), aff(row + 2), aff(row + 3));
rowCodes = tex1Dfetch(rowTexture, row >> 2);
}
__syncthreads();
}
if (row < 0 || row >= rows_) return;
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, Sub sub) {
if (blockIdx.x == (gridDim.x - 1)) {
solveShortDelegated(d, vBus, hBus, sub);
} else {
solveShortNormal(d, vBus, hBus, sub);
}
}
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x + blockDim.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < cellWidth_ - blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernel(void* params) {
Context* context = (Context*) params;
int* queryEnd = context->queryEnd;
int* targetEnd = context->targetEnd;
int* outScore = context->outScore;
Chain* query = context->query;
Chain* target = context->target;
Scorer* scorer = context->scorer;
// int score = context->score;
int card = context->card;
int currentCard;
CUDA_SAFE_CALL(hipGetDevice(¤tCard));
if (currentCard != card) {
// CUDA_SAFE_CALL(hipDeviceReset());
CUDA_SAFE_CALL(hipSetDevice(card));
}
TIMER_START("Hw align %d %d", chainGetLength(query), chainGetLength(target));
int rows = chainGetLength(query);
int cols = chainGetLength(target);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int scorerLen = scorerGetMaxCode(scorer);
int subLen = scorerLen + 1;
int scalar = scorerIsScalar(scorer);
hipDeviceProp_t properties;
CUDA_SAFE_CALL(hipGetDeviceProperties(&properties, card));
int threads;
int blocks;
if (properties.major < 2) {
threads = THREADS_SM1;
blocks = BLOCKS_SM1;
} else {
threads = THREADS_SM2;
blocks = BLOCKS_SM2;
}
ASSERT(threads * 2 <= cols, "too short gpu target chain");
if (threads * blocks * 2 > cols) {
blocks = (int) (cols / (threads * 2.));
blocks = blocks <= 30 ? blocks : blocks - (blocks % 30);
// LOG("Blocks trimmed to: %d", blocks);
}
int cellHeight = 4 * threads;
int rowsGpu = rows + (4 - rows % 4) % 4;
int dRow = rowsGpu - rows;
int colsGpu = (cols + 4) + (blocks - (cols + 4) % blocks) % blocks;
int cellWidth = colsGpu / blocks;
int diagonals = blocks + (int) ceil((float) rowsGpu / cellHeight);
int memoryUsedGpu = 0;
int memoryUsedCpu = 0;
/*
LOG("Rows cpu: %d, gpu: %d", rows, rowsGpu);
LOG("Columns cpu: %d, gpu: %d", cols, colsGpu);
LOG("Cell h: %d, w: %d", cellHeight, cellWidth);
LOG("Diagonals: %d", diagonals);
*/
//**************************************************************************
// PADD CHAINS
char* rowCpu = (char*) malloc(rowsGpu * sizeof(char));
memset(rowCpu, scorerLen, dRow * sizeof(char));
chainCopyCodes(query, rowCpu + dRow);
memoryUsedCpu += rowsGpu * sizeof(char);
char* colCpu = (char*) malloc(colsGpu * sizeof(char));
memset(colCpu + cols, scorerLen + scalar, (colsGpu - cols) * sizeof(char));
chainCopyCodes(target, colCpu);
memoryUsedCpu += colsGpu * sizeof(char);
//**************************************************************************
//**************************************************************************
// INIT GPU
int rowSize = rowsGpu * sizeof(char);
char* rowGpu;
CUDA_SAFE_CALL(hipMalloc(&rowGpu, rowSize));
CUDA_SAFE_CALL(hipMemcpy(rowGpu, rowCpu, rowSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, rowTexture, rowGpu, rowSize));
memoryUsedGpu += rowSize;
int colSize = colsGpu * sizeof(char);
char* colGpu;
CUDA_SAFE_CALL(hipMalloc(&colGpu, colSize));
CUDA_SAFE_CALL(hipMemcpy(colGpu, colCpu, colSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, colTexture, colGpu, colSize));
memoryUsedGpu += colSize;
int hBusSize = colsGpu * sizeof(int2);
int2* hBusCpu = (int2*) malloc(hBusSize);
int2* hBusGpu;
for (int i = 0; i < colsGpu; ++i) {
hBusCpu[i] = make_int2(0, SCORE_MIN);
}
CUDA_SAFE_CALL(hipMalloc(&hBusGpu, hBusSize));
CUDA_SAFE_CALL(hipMemcpy(hBusGpu, hBusCpu, hBusSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, hBusTexture, hBusGpu, hBusSize));
memoryUsedCpu += hBusSize;
memoryUsedGpu += hBusSize;
VBus vBusGpu;
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.mch, blocks * threads * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.scr, blocks * threads * sizeof(int4)));
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.aff, blocks * threads * sizeof(int4)));
memoryUsedGpu += blocks * threads * sizeof(int);
memoryUsedGpu += blocks * threads * sizeof(int4);
memoryUsedGpu += blocks * threads * sizeof(int4);
size_t subSize = subLen * subLen * sizeof(int);
int* subCpu = (int*) malloc(subSize);
int* subGpu;
for (int i = 0; i < subLen; ++i) {
for (int j = 0; j < subLen; ++j) {
if (i < scorerLen && j < scorerLen) {
subCpu[i * subLen + j] = scorerScore(scorer, i, j);
} else {
subCpu[i * subLen + j] = 0;
}
}
}
CUDA_SAFE_CALL(hipMalloc(&subGpu, subSize));
CUDA_SAFE_CALL(hipMemcpy(subGpu, subCpu, subSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, subTexture, subGpu, subSize));
memoryUsedCpu += subSize;
memoryUsedGpu += subSize;
CUDA_SAFE_CALL(hipMemcpyToSymbol(match_, &(subCpu[0]), sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(mismatch_, &(subCpu[1]), sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(scorerLen_, &scorerLen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(subLen_, &subLen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(rows_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(cols_, &colsGpu, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(cellWidth_, &cellWidth, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(dRow_, &dRow, sizeof(int)));
/*
LOG("Memory used CPU: %fMB", memoryUsedCpu / 1024. / 1024.);
LOG("Memory used GPU: %fMB", memoryUsedGpu / 1024. / 1024.);
*/
//**************************************************************************
//**************************************************************************
// KERNEL RUN
//TIMER_START("Kernel");
for (int diagonal = 0; diagonal < diagonals; ++diagonal) {
if (scalar) {
hipLaunchKernelGGL(( solveShort), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, SubScalarRev());
hipLaunchKernelGGL(( solveLong), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, SubScalarRev());
} else {
hipLaunchKernelGGL(( solveShort), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, SubVector());
hipLaunchKernelGGL(( solveLong), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, SubVector());
}
}
//TIMER_STOP;
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
CUDA_SAFE_CALL(hipMemcpy(hBusCpu, hBusGpu, hBusSize, FROM_GPU));
*queryEnd = rows - 1;
*outScore = hBusCpu[0].x;
*targetEnd = 0;
for (int i = 1; i < cols; ++i) {
if (hBusCpu[i].x > *outScore) {
*outScore = hBusCpu[i].x;
*targetEnd = i;
}
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(subCpu);
free(rowCpu);
free(colCpu);
free(hBusCpu);
CUDA_SAFE_CALL(hipFree(subGpu));
CUDA_SAFE_CALL(hipFree(rowGpu));
CUDA_SAFE_CALL(hipFree(colGpu));
CUDA_SAFE_CALL(hipFree(vBusGpu.mch));
CUDA_SAFE_CALL(hipFree(vBusGpu.scr));
CUDA_SAFE_CALL(hipFree(vBusGpu.aff));
CUDA_SAFE_CALL(hipFree(hBusGpu));
CUDA_SAFE_CALL(hipUnbindTexture(rowTexture));
CUDA_SAFE_CALL(hipUnbindTexture(colTexture));
CUDA_SAFE_CALL(hipUnbindTexture(hBusTexture));
CUDA_SAFE_CALL(hipUnbindTexture(subTexture));
free(params);
//**************************************************************************
TIMER_STOP;
return NULL;
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __HIPCC__
| 2630a67d04339d4f4326d8372b1f599b1d1b7a2d.cu | /*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile Šikić
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __CUDACC__
#include <stdio.h>
#include <stdlib.h>
#include "chain.h"
#include "constants.h"
#include "cuda_utils.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "gpu_module.h"
#define MAX_THREADS MAX(THREADS_SM1, THREADS_SM2)
#define THREADS_SM1 64
#define BLOCKS_SM1 240
#define THREADS_SM2 128
#define BLOCKS_SM2 240
#define SCORE4_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
typedef struct VBus {
int* mch;
int4* scr;
int4* aff;
} VBus;
typedef struct Context {
int* queryEnd;
int* targetEnd;
int* outScore;
Chain* query;
Chain* target;
Scorer* scorer;
int score;
int card;
} Context;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int dRow_;
static __constant__ int rows_;
static __constant__ int cols_;
static __constant__ int cellWidth_;
static __constant__ int scorerLen_;
static __constant__ int subLen_;
static __constant__ int match_;
static __constant__ int mismatch_;
texture<char4> rowTexture;
texture<char> colTexture;
texture<int2> hBusTexture;
texture<int> subTexture;
//******************************************************************************
// PUBLIC
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// With visual c++ compiler and prototypes declared cuda global memory variables
// do not work. No questions asked.
#ifndef _WIN32
__device__ static int gap(int idx);
__device__ static int aff(int idx);
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, Sub sub);
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, Sub sub);
#endif
static void* kernel(void* params);
//******************************************************************************
//******************************************************************************
// PUBLIC
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
Context* param = (Context*) malloc(sizeof(Context));
param->queryEnd = queryEnd;
param->targetEnd = targetEnd;
param->outScore = outScore;
param->query = query;
param->target = target;
param->scorer = scorer;
param->score = score;
param->card = card;
if (thread == NULL) {
kernel(param);
} else {
threadCreate(thread, kernel, (void*) param);
}
}
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// FUNCTORS
class SubScalarRev {
public:
__device__ int operator () (char a, char b) {
return (a == b ? match_ : mismatch_) * (a < scorerLen_ && b < scorerLen_);
}
};
class SubVector {
public:
__device__ int operator () (char a, char b) {
return tex1Dfetch(subTexture, (a * subLen_) + b);
}
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
__device__ static int gap(int idx) {
if (idx == dRow_ - 1) return 0;
if (idx < dRow_ - 1) return 0;
return -gapOpen_ - gapExtend_ * (idx - dRow_);
}
__device__ static int aff(int idx) {
return SCORE_MIN;
}
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0) return;
row -= (col < 0) * (gridDim.x * blockDim.x * 4);
col += (col < 0) * cols_;
Atom atom;
if (0 <= row && row < rows_ && col > 0) {
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
} else {
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = make_int4(aff(row), aff(row + 1), aff(row + 2), aff(row + 3));
}
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i) {
if (0 <= row && row < rows_) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (0 <= row && row < rows_) {
if (threadIdx.x == blockDim.x - 1 || i == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
++col;
if (col == cols_) {
col = 0;
row = row + gridDim.x * blockDim.x * 4;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = make_int4(aff(row), aff(row + 1), aff(row + 2), aff(row + 3));
rowCodes = tex1Dfetch(rowTexture, row >> 2);
}
__syncthreads();
}
if (row < 0 || row >= rows_) return;
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, Sub sub) {
if (blockIdx.x == (gridDim.x - 1)) {
solveShortDelegated(d, vBus, hBus, sub);
} else {
solveShortNormal(d, vBus, hBus, sub);
}
}
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x + blockDim.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < cellWidth_ - blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernel(void* params) {
Context* context = (Context*) params;
int* queryEnd = context->queryEnd;
int* targetEnd = context->targetEnd;
int* outScore = context->outScore;
Chain* query = context->query;
Chain* target = context->target;
Scorer* scorer = context->scorer;
// int score = context->score;
int card = context->card;
int currentCard;
CUDA_SAFE_CALL(cudaGetDevice(¤tCard));
if (currentCard != card) {
// CUDA_SAFE_CALL(cudaThreadExit());
CUDA_SAFE_CALL(cudaSetDevice(card));
}
TIMER_START("Hw align %d %d", chainGetLength(query), chainGetLength(target));
int rows = chainGetLength(query);
int cols = chainGetLength(target);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int scorerLen = scorerGetMaxCode(scorer);
int subLen = scorerLen + 1;
int scalar = scorerIsScalar(scorer);
cudaDeviceProp properties;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&properties, card));
int threads;
int blocks;
if (properties.major < 2) {
threads = THREADS_SM1;
blocks = BLOCKS_SM1;
} else {
threads = THREADS_SM2;
blocks = BLOCKS_SM2;
}
ASSERT(threads * 2 <= cols, "too short gpu target chain");
if (threads * blocks * 2 > cols) {
blocks = (int) (cols / (threads * 2.));
blocks = blocks <= 30 ? blocks : blocks - (blocks % 30);
// LOG("Blocks trimmed to: %d", blocks);
}
int cellHeight = 4 * threads;
int rowsGpu = rows + (4 - rows % 4) % 4;
int dRow = rowsGpu - rows;
int colsGpu = (cols + 4) + (blocks - (cols + 4) % blocks) % blocks;
int cellWidth = colsGpu / blocks;
int diagonals = blocks + (int) ceil((float) rowsGpu / cellHeight);
int memoryUsedGpu = 0;
int memoryUsedCpu = 0;
/*
LOG("Rows cpu: %d, gpu: %d", rows, rowsGpu);
LOG("Columns cpu: %d, gpu: %d", cols, colsGpu);
LOG("Cell h: %d, w: %d", cellHeight, cellWidth);
LOG("Diagonals: %d", diagonals);
*/
//**************************************************************************
// PADD CHAINS
char* rowCpu = (char*) malloc(rowsGpu * sizeof(char));
memset(rowCpu, scorerLen, dRow * sizeof(char));
chainCopyCodes(query, rowCpu + dRow);
memoryUsedCpu += rowsGpu * sizeof(char);
char* colCpu = (char*) malloc(colsGpu * sizeof(char));
memset(colCpu + cols, scorerLen + scalar, (colsGpu - cols) * sizeof(char));
chainCopyCodes(target, colCpu);
memoryUsedCpu += colsGpu * sizeof(char);
//**************************************************************************
//**************************************************************************
// INIT GPU
int rowSize = rowsGpu * sizeof(char);
char* rowGpu;
CUDA_SAFE_CALL(cudaMalloc(&rowGpu, rowSize));
CUDA_SAFE_CALL(cudaMemcpy(rowGpu, rowCpu, rowSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, rowTexture, rowGpu, rowSize));
memoryUsedGpu += rowSize;
int colSize = colsGpu * sizeof(char);
char* colGpu;
CUDA_SAFE_CALL(cudaMalloc(&colGpu, colSize));
CUDA_SAFE_CALL(cudaMemcpy(colGpu, colCpu, colSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, colTexture, colGpu, colSize));
memoryUsedGpu += colSize;
int hBusSize = colsGpu * sizeof(int2);
int2* hBusCpu = (int2*) malloc(hBusSize);
int2* hBusGpu;
for (int i = 0; i < colsGpu; ++i) {
hBusCpu[i] = make_int2(0, SCORE_MIN);
}
CUDA_SAFE_CALL(cudaMalloc(&hBusGpu, hBusSize));
CUDA_SAFE_CALL(cudaMemcpy(hBusGpu, hBusCpu, hBusSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, hBusTexture, hBusGpu, hBusSize));
memoryUsedCpu += hBusSize;
memoryUsedGpu += hBusSize;
VBus vBusGpu;
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.mch, blocks * threads * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.scr, blocks * threads * sizeof(int4)));
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.aff, blocks * threads * sizeof(int4)));
memoryUsedGpu += blocks * threads * sizeof(int);
memoryUsedGpu += blocks * threads * sizeof(int4);
memoryUsedGpu += blocks * threads * sizeof(int4);
size_t subSize = subLen * subLen * sizeof(int);
int* subCpu = (int*) malloc(subSize);
int* subGpu;
for (int i = 0; i < subLen; ++i) {
for (int j = 0; j < subLen; ++j) {
if (i < scorerLen && j < scorerLen) {
subCpu[i * subLen + j] = scorerScore(scorer, i, j);
} else {
subCpu[i * subLen + j] = 0;
}
}
}
CUDA_SAFE_CALL(cudaMalloc(&subGpu, subSize));
CUDA_SAFE_CALL(cudaMemcpy(subGpu, subCpu, subSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, subTexture, subGpu, subSize));
memoryUsedCpu += subSize;
memoryUsedGpu += subSize;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(match_, &(subCpu[0]), sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(mismatch_, &(subCpu[1]), sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(scorerLen_, &scorerLen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(subLen_, &subLen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(rows_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(cols_, &colsGpu, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(cellWidth_, &cellWidth, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(dRow_, &dRow, sizeof(int)));
/*
LOG("Memory used CPU: %fMB", memoryUsedCpu / 1024. / 1024.);
LOG("Memory used GPU: %fMB", memoryUsedGpu / 1024. / 1024.);
*/
//**************************************************************************
//**************************************************************************
// KERNEL RUN
//TIMER_START("Kernel");
for (int diagonal = 0; diagonal < diagonals; ++diagonal) {
if (scalar) {
solveShort<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, SubScalarRev());
solveLong<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, SubScalarRev());
} else {
solveShort<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, SubVector());
solveLong<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, SubVector());
}
}
//TIMER_STOP;
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
CUDA_SAFE_CALL(cudaMemcpy(hBusCpu, hBusGpu, hBusSize, FROM_GPU));
*queryEnd = rows - 1;
*outScore = hBusCpu[0].x;
*targetEnd = 0;
for (int i = 1; i < cols; ++i) {
if (hBusCpu[i].x > *outScore) {
*outScore = hBusCpu[i].x;
*targetEnd = i;
}
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(subCpu);
free(rowCpu);
free(colCpu);
free(hBusCpu);
CUDA_SAFE_CALL(cudaFree(subGpu));
CUDA_SAFE_CALL(cudaFree(rowGpu));
CUDA_SAFE_CALL(cudaFree(colGpu));
CUDA_SAFE_CALL(cudaFree(vBusGpu.mch));
CUDA_SAFE_CALL(cudaFree(vBusGpu.scr));
CUDA_SAFE_CALL(cudaFree(vBusGpu.aff));
CUDA_SAFE_CALL(cudaFree(hBusGpu));
CUDA_SAFE_CALL(cudaUnbindTexture(rowTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(colTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(hBusTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(subTexture));
free(params);
//**************************************************************************
TIMER_STOP;
return NULL;
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __CUDACC__
|
f4696d397672408e700376d230ef9de50d4fc688.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <assert.h>
#include "blas.h"
#include "hip/hip_runtime.h"
#include "utils.h"
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates);
check_error(hipPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n);
}else{
hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size);
}
check_error(hipPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(hipPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t);
check_error(hipPeekAtLastError());
}
void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(hipPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(hipPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int b = index / spatial;
int i = index % spatial;
int f;
float sum = 0;
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
sum += powf(x[index], 2);
}
sum = sqrtf(sum);
if(sum == 0) sum = 1;
//printf("%f\n", sum);
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
x[index] /= sum;
dx[index] = (1 - x[index]) / sum;
}
}
void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
{
size_t N = batch*spatial;
hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
{
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val);
check_error(hipPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale);
check_error(hipPeekAtLastError());
}
void const_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
void add_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
void supp_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
//out[out_index] += add[add_index];
}
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
check_error(hipPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
delta[i] = t-p;
}
}
void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
error[i] = truth[i] ? -pred[i] : pred[i];
delta[i] = (truth[i] > 0) ? 1 : -1;
}
}
void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c);
check_error(hipPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc);
check_error(hipPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c);
check_error(hipPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(hipPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(hipPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int out_index = i;
int out_w = i%(w*stride);
i = i/(w*stride);
int out_h = i%(h*stride);
i = i/(h*stride);
int out_c = i%c;
i = i/c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if(forward) out[out_index] += scale * x[in_index];
else atomicAdd(x+in_index, scale * out[out_index]);
}
void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, in, w, h, c, batch, stride, forward, scale, out);
check_error(hipPeekAtLastError());
}
| f4696d397672408e700376d230ef9de50d4fc688.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <assert.h>
#include "blas.h"
#include "cuda.h"
#include "utils.h"
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
check_error(cudaPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
}else{
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
}
check_error(cudaPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t);
check_error(cudaPeekAtLastError());
}
void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(cudaPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(cudaPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int b = index / spatial;
int i = index % spatial;
int f;
float sum = 0;
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
sum += powf(x[index], 2);
}
sum = sqrtf(sum);
if(sum == 0) sum = 1;
//printf("%f\n", sum);
for(f = 0; f < filters; ++f){
int index = b*filters*spatial + f*spatial + i;
x[index] /= sum;
dx[index] = (1 - x[index]) / sum;
}
}
void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
{
size_t N = batch*spatial;
l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
{
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val);
check_error(cudaPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
check_error(cudaPeekAtLastError());
}
void const_gpu(int N, float ALPHA, float * X, int INCX)
{
const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
void add_gpu(int N, float ALPHA, float * X, int INCX)
{
add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
void supp_gpu(int N, float ALPHA, float * X, int INCX)
{
supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
//out[out_index] += add[add_index];
}
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
check_error(cudaPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
delta[i] = t-p;
}
}
void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
error[i] = truth[i] ? -pred[i] : pred[i];
delta[i] = (truth[i] > 0) ? 1 : -1;
}
}
void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
check_error(cudaPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
check_error(cudaPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(cudaPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(cudaPeekAtLastError());
}
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int out_index = i;
int out_w = i%(w*stride);
i = i/(w*stride);
int out_h = i%(h*stride);
i = i/(h*stride);
int out_c = i%c;
i = i/c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if(forward) out[out_index] += scale * x[in_index];
else atomicAdd(x+in_index, scale * out[out_index]);
}
void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(cudaPeekAtLastError());
}
|
013be8594157106b3387b8d428ba566b8b3a6440.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Currently, `initializeElementsTo`, if executed in a thread whose
* `i` is calculated to be greater than `N`, will try to access a value
* outside the range of `a`.
*
* Refactor the kernel defintition to prevent our of range accesses.
*/
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
a[i] = initialValue;
}
}
int main()
{
/*
* Do not modify `N`.
*/
int N = 1000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
/*
* Assume we have reason to want the number of threads
* fixed at `256`: do not modify `threads_per_block`.
*/
size_t threads_per_block = 256;
/*
* Assign a value to `number_of_blocks` that will
* allow for a working execution configuration given
* the fixed values for `N` and `threads_per_block`.
*/
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
int initialValue = 6;
hipLaunchKernelGGL(( initializeElementsTo), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, initialValue, a, N);
hipDeviceSynchronize();
/*
* Check to make sure all values in `a`, were initialized.
*/
for (int i = 0; i < N; ++i)
{
if(a[i] != initialValue)
{
printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]);
exit(1);
}
}
printf("SUCCESS!\n");
hipFree(a);
} | 013be8594157106b3387b8d428ba566b8b3a6440.cu | #include <stdio.h>
/*
* Currently, `initializeElementsTo`, if executed in a thread whose
* `i` is calculated to be greater than `N`, will try to access a value
* outside the range of `a`.
*
* Refactor the kernel defintition to prevent our of range accesses.
*/
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
a[i] = initialValue;
}
}
int main()
{
/*
* Do not modify `N`.
*/
int N = 1000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
/*
* Assume we have reason to want the number of threads
* fixed at `256`: do not modify `threads_per_block`.
*/
size_t threads_per_block = 256;
/*
* Assign a value to `number_of_blocks` that will
* allow for a working execution configuration given
* the fixed values for `N` and `threads_per_block`.
*/
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
int initialValue = 6;
initializeElementsTo<<<number_of_blocks, threads_per_block>>>(initialValue, a, N);
cudaDeviceSynchronize();
/*
* Check to make sure all values in `a`, were initialized.
*/
for (int i = 0; i < N; ++i)
{
if(a[i] != initialValue)
{
printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]);
exit(1);
}
}
printf("SUCCESS!\n");
cudaFree(a);
} |
6ad631dd17b09535dfbd0e9d28168a18e5d5d236.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/spectral/matrix_wrappers.hpp>
#include "spmv_1D.cuh"
namespace cugraph {
namespace mg {
template <typename vertex_t, typename edge_t, typename weight_t>
MGcsrmv<vertex_t, edge_t, weight_t>::MGcsrmv(raft::handle_t const &handle,
vertex_t *local_vertices,
vertex_t *part_off,
edge_t *off,
vertex_t *ind,
weight_t *val,
weight_t *x)
: handle_(handle),
local_vertices_(local_vertices),
part_off_(part_off),
off_(off),
ind_(ind),
val_(val)
{
i_ = handle_.get_comms().get_rank();
p_ = handle_.get_comms().get_size();
v_glob_ = part_off_[p_ - 1] + local_vertices_[p_ - 1];
v_loc_ = local_vertices_[i_];
vertex_t tmp;
CUDA_TRY(hipMemcpy(&tmp, &off_[v_loc_], sizeof(vertex_t), hipMemcpyDeviceToHost));
e_loc_ = tmp;
y_loc_.resize(v_loc_);
}
template <typename vertex_t, typename edge_t, typename weight_t>
MGcsrmv<vertex_t, edge_t, weight_t>::~MGcsrmv()
{
}
template <typename vertex_t, typename edge_t, typename weight_t>
void MGcsrmv<vertex_t, edge_t, weight_t>::run(weight_t *x)
{
using namespace raft::matrix;
weight_t h_one = 1.0;
weight_t h_zero = 0.0;
sparse_matrix_t<vertex_t, weight_t> mat{handle_, // raft handle
off_, // CSR row_offsets
ind_, // CSR col_indices
val_, // CSR values
static_cast<vertex_t>(v_loc_), // n_rows
static_cast<vertex_t>(v_glob_), // n_cols
static_cast<vertex_t>(e_loc_)}; // nnz
mat.mv(h_one, // alpha
x, // x
h_zero, // beta
y_loc_.data().get(), // y
sparse_mv_alg_t::SPARSE_MV_ALG2); // SpMV algorithm
auto stream = handle_.get_stream();
auto const &comm{handle_.get_comms()}; // local
std::vector<size_t> recvbuf(comm.get_size());
std::vector<size_t> displs(comm.get_size());
std::copy(local_vertices_, local_vertices_ + comm.get_size(), recvbuf.begin());
std::copy(part_off_, part_off_ + comm.get_size(), displs.begin());
comm.allgatherv(y_loc_.data().get(), x, recvbuf.data(), displs.data(), stream);
}
template class MGcsrmv<int32_t, int32_t, double>;
template class MGcsrmv<int32_t, int32_t, float>;
} // namespace mg
} // namespace cugraph
| 6ad631dd17b09535dfbd0e9d28168a18e5d5d236.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/spectral/matrix_wrappers.hpp>
#include "spmv_1D.cuh"
namespace cugraph {
namespace mg {
template <typename vertex_t, typename edge_t, typename weight_t>
MGcsrmv<vertex_t, edge_t, weight_t>::MGcsrmv(raft::handle_t const &handle,
vertex_t *local_vertices,
vertex_t *part_off,
edge_t *off,
vertex_t *ind,
weight_t *val,
weight_t *x)
: handle_(handle),
local_vertices_(local_vertices),
part_off_(part_off),
off_(off),
ind_(ind),
val_(val)
{
i_ = handle_.get_comms().get_rank();
p_ = handle_.get_comms().get_size();
v_glob_ = part_off_[p_ - 1] + local_vertices_[p_ - 1];
v_loc_ = local_vertices_[i_];
vertex_t tmp;
CUDA_TRY(cudaMemcpy(&tmp, &off_[v_loc_], sizeof(vertex_t), cudaMemcpyDeviceToHost));
e_loc_ = tmp;
y_loc_.resize(v_loc_);
}
template <typename vertex_t, typename edge_t, typename weight_t>
MGcsrmv<vertex_t, edge_t, weight_t>::~MGcsrmv()
{
}
template <typename vertex_t, typename edge_t, typename weight_t>
void MGcsrmv<vertex_t, edge_t, weight_t>::run(weight_t *x)
{
using namespace raft::matrix;
weight_t h_one = 1.0;
weight_t h_zero = 0.0;
sparse_matrix_t<vertex_t, weight_t> mat{handle_, // raft handle
off_, // CSR row_offsets
ind_, // CSR col_indices
val_, // CSR values
static_cast<vertex_t>(v_loc_), // n_rows
static_cast<vertex_t>(v_glob_), // n_cols
static_cast<vertex_t>(e_loc_)}; // nnz
mat.mv(h_one, // alpha
x, // x
h_zero, // beta
y_loc_.data().get(), // y
sparse_mv_alg_t::SPARSE_MV_ALG2); // SpMV algorithm
auto stream = handle_.get_stream();
auto const &comm{handle_.get_comms()}; // local
std::vector<size_t> recvbuf(comm.get_size());
std::vector<size_t> displs(comm.get_size());
std::copy(local_vertices_, local_vertices_ + comm.get_size(), recvbuf.begin());
std::copy(part_off_, part_off_ + comm.get_size(), displs.begin());
comm.allgatherv(y_loc_.data().get(), x, recvbuf.data(), displs.data(), stream);
}
template class MGcsrmv<int32_t, int32_t, double>;
template class MGcsrmv<int32_t, int32_t, float>;
} // namespace mg
} // namespace cugraph
|
6a1741324967feeb7c0991f7ae5dbc82557540f6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply
// and Accumulate API introduced in CUDA 9.
// In this program, the compute_gemm kernel computes the result of a matrix
// multiplication and addition: D = alpha * A * B + beta * C. The dimensions of
// both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x
// K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In
// that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per
// iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 128 x 128 tile to compute.
// Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes
// eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps
// compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and
// accumulating the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 128 x 128 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments
// from shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B
// matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the
// A and B data from shared memory, thus reducing the number of data copies
// from global memory.
// - The portions of the A and B matrices are stored in shared memory with an
// additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_HALF macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each
// warp stores
// its subtiles to shared memory. The CTA then copies the shared memory
// contents to global memory, again avoiding redundant random global memory
// accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register
// utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 1
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 16
#define N 16
#define K 16
#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16
// GEMM configuration.
#define M_TILES 256
#define N_TILES 256
#define K_TILES 256
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit two 8-tile chunks of
// the A and B matrix data, that are 16 * 16 * 8 * 8 * 2 = 32 Kb each
// (i.e. two 8x8 arrays of tiles of 16x16 half-typed elements per CTA).
// But we cannot account the 8 Kb total skew overhead, without which the
// performance would be severely impacted. So we choose to reduce the chunk size
// in half, i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 4
#else
#define CHUNK_K 8
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(half))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 16
// two-byte "half" elements is chosen as the minimum possible shift because we
// must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW_HALF 16
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
__host__ void init_host_matrices(half *a, half *b, float *c) {
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i * K_GLOBAL + j] = (half)(rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i * K_GLOBAL + j] = (half)(rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (float)(rand() % 3);
}
}
__global__ void compute_gemm(const half *A, const half *B, const float *C,
float *D, float alpha, float beta) {
extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
float *shmem_warp_tile_ptr = (float *)&shmem[0][0] +
(warpId / 2) * SHMEM_STRIDE * K * 2 +
(warpId % 2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and
// from shared memory.
float *shmem_warp_stream_ptr =
(float *)&shmem[0][0] + warpId * SHMEM_STRIDE * K;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may
// result in a loss of precision). Zero still needs to be specially handled
// though.
beta /= alpha;
// Each CTA slides along the 128 x 128 tiles from the top left corner of the
// matrix to the right and down, and selects the next tile to compute. Once
// there's no such tile, all warps in this CTA exit.
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i =
((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared
// memory.
const size_t gmem_idx =
(block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const float *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < K; i++) {
typedef int4 copy_t;
*((copy_t *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((copy_t *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) +
laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment
// multiplications along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES]
[WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const float *tile_ptr =
shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] +
M * K_GLOBAL * (warpId % 4) * 2)
: (&B[block_tile_j * N * K_GLOBAL] +
N * K_GLOBAL * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
size_t shmem_idx =
warpId < (WARPS_PER_BLOCK / 2)
? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2)
: (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * K +
(laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL) +
(laneId % CHUNK_COPY_LINE_LANES);
// Shift the second half of the warp to the next row / column in the
// shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP) * 2;
i++) {
// Copy 16 bytes at once in each lane.
*((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) =
*lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr =
(int4 *)((half *)lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP);
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major>
a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major>
b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const half *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const half *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
// Uniform, point-wise transformations of ALL fragment elements by ALL
// threads in the warp are well-defined even though element indices
// within fragment storage are not defined.
for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha;
float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global
// memory.
float *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < K; i++) {
*((int4 *)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
}
// Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 16.
// 3) Neither A nor B are transposed.
// Note: This is a less performant version of the compute_gemm kernel. It is
// designed for
// demonstration purposes only to show the CUDA WMMA API use without
// relying on availability of the shared memory.
__global__ void simple_wmma_gemm(half *a, half *b, float *c, float *d, int m_ld,
int n_ld, int k_ld, float alpha, float beta) {
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major>
a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major>
b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += WMMA_K) {
int aCol = i;
int aRow = warpM * WMMA_M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result
// scaled by alpha
int cCol = warpN * WMMA_N;
int cRow = warpM * WMMA_M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc,
wmma::mem_row_major);
for (int i = 0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc,
wmma::mem_row_major);
}
}
__host__ void matMultiplyOnHost(half *A, half *B, float *C, float alpha,
float beta, int numARows, int numAColumns,
int numBRows, int numBColumns, int numCRows,
int numCColumns) {
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
float temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
temp += (float)A[i * numAColumns + k] * (float)B[j * numBRows + k];
}
C[i * numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Tensor cores require a GPU of Volta (SM7X) architecture or higher.
if (deviceProp.major < 7) {
printf(
"cudaTensorCoreGemm requires SM 7.0 or higher to use Tensor "
"Cores. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
half *A_h = NULL;
half *B_h = NULL;
float *C_h = NULL;
#if CPU_DEBUG
float *result_hD = NULL;
float *result_host = NULL;
#endif
A_h = (half *)malloc(sizeof(half) * M_GLOBAL * K_GLOBAL);
B_h = (half *)malloc(sizeof(half) * K_GLOBAL * N_GLOBAL);
C_h = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
result_host = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
#endif
half *A = NULL;
half *B = NULL;
float *C = NULL;
float *D = NULL;
checkCudaErrors(hipMalloc((void **)&A, sizeof(half) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void **)&B, sizeof(half) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void **)&C, sizeof(float) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(hipMalloc((void **)&D, sizeof(float) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(hipMemcpy(A, A_h, sizeof(half) * M_GLOBAL * K_GLOBAL,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(B, B_h, sizeof(half) * N_GLOBAL * K_GLOBAL,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(C, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(D, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache
// per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the
// maximum of those
// two numbers.
SHMEM_SZ = MAX(
sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N *
(BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(float))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const float alpha = 1.1f;
const float beta = 1.2f;
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
// If enough shared memory available on the GPU use high performant kernel
if (deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) {
printf("Computing... using high performance kernel compute_gemm \n");
checkCudaErrors(hipFuncSetAttribute(
compute_gemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors(
hipLaunchKernelGGL(( (compute_gemm), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D,
sizeof(float) * M_GLOBAL * N_GLOBAL,
hipMemcpyDeviceToHost));
#endif
} else {
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (WMMA_M * blockDim.x / 32 - 1)) /
(WMMA_M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
hipLaunchKernelGGL(( simple_wmma_gemm), dim3(gridDim), dim3(blockDim), 0, 0, A, B, C, D, M_GLOBAL, N_GLOBAL,
K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D,
sizeof(float) * M_GLOBAL * N_GLOBAL,
hipMemcpyDeviceToHost));
#endif
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host, alpha, beta, M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL, M_GLOBAL, N_GLOBAL);
for (int i = 0; i < N_GLOBAL * M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i],
result_host[i]);
}
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2) /
(milliseconds / 1000.)) /
1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(hipFree((void *)A));
checkCudaErrors(hipFree((void *)B));
checkCudaErrors(hipFree((void *)C));
checkCudaErrors(hipFree((void *)D));
return 0;
}
| 6a1741324967feeb7c0991f7ae5dbc82557540f6.cu | /*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply
// and Accumulate API introduced in CUDA 9.
// In this program, the compute_gemm kernel computes the result of a matrix
// multiplication and addition: D = alpha * A * B + beta * C. The dimensions of
// both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x
// K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In
// that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per
// iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 128 x 128 tile to compute.
// Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes
// eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps
// compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and
// accumulating the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 128 x 128 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments
// from shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B
// matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the
// A and B data from shared memory, thus reducing the number of data copies
// from global memory.
// - The portions of the A and B matrices are stored in shared memory with an
// additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_HALF macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each
// warp stores
// its subtiles to shared memory. The CTA then copies the shared memory
// contents to global memory, again avoiding redundant random global memory
// accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register
// utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 1
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 16
#define N 16
#define K 16
#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16
// GEMM configuration.
#define M_TILES 256
#define N_TILES 256
#define K_TILES 256
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit two 8-tile chunks of
// the A and B matrix data, that are 16 * 16 * 8 * 8 * 2 = 32 Kb each
// (i.e. two 8x8 arrays of tiles of 16x16 half-typed elements per CTA).
// But we cannot account the 8 Kb total skew overhead, without which the
// performance would be severely impacted. So we choose to reduce the chunk size
// in half, i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 4
#else
#define CHUNK_K 8
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(half))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 16
// two-byte "half" elements is chosen as the minimum possible shift because we
// must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW_HALF 16
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
__host__ void init_host_matrices(half *a, half *b, float *c) {
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i * K_GLOBAL + j] = (half)(rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i * K_GLOBAL + j] = (half)(rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (float)(rand() % 3);
}
}
__global__ void compute_gemm(const half *A, const half *B, const float *C,
float *D, float alpha, float beta) {
extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
float *shmem_warp_tile_ptr = (float *)&shmem[0][0] +
(warpId / 2) * SHMEM_STRIDE * K * 2 +
(warpId % 2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and
// from shared memory.
float *shmem_warp_stream_ptr =
(float *)&shmem[0][0] + warpId * SHMEM_STRIDE * K;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may
// result in a loss of precision). Zero still needs to be specially handled
// though.
beta /= alpha;
// Each CTA slides along the 128 x 128 tiles from the top left corner of the
// matrix to the right and down, and selects the next tile to compute. Once
// there's no such tile, all warps in this CTA exit.
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i =
((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared
// memory.
const size_t gmem_idx =
(block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const float *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < K; i++) {
typedef int4 copy_t;
*((copy_t *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((copy_t *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) +
laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment
// multiplications along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES]
[WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const float *tile_ptr =
shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] +
M * K_GLOBAL * (warpId % 4) * 2)
: (&B[block_tile_j * N * K_GLOBAL] +
N * K_GLOBAL * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
size_t shmem_idx =
warpId < (WARPS_PER_BLOCK / 2)
? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2)
: (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * K +
(laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL) +
(laneId % CHUNK_COPY_LINE_LANES);
// Shift the second half of the warp to the next row / column in the
// shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP) * 2;
i++) {
// Copy 16 bytes at once in each lane.
*((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) =
*lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr =
(int4 *)((half *)lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP);
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major>
a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major>
b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const half *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const half *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
// Uniform, point-wise transformations of ALL fragment elements by ALL
// threads in the warp are well-defined even though element indices
// within fragment storage are not defined.
for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha;
float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global
// memory.
float *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < K; i++) {
*((int4 *)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
}
// Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 16.
// 3) Neither A nor B are transposed.
// Note: This is a less performant version of the compute_gemm kernel. It is
// designed for
// demonstration purposes only to show the CUDA WMMA API use without
// relying on availability of the shared memory.
__global__ void simple_wmma_gemm(half *a, half *b, float *c, float *d, int m_ld,
int n_ld, int k_ld, float alpha, float beta) {
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major>
a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major>
b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += WMMA_K) {
int aCol = i;
int aRow = warpM * WMMA_M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result
// scaled by alpha
int cCol = warpN * WMMA_N;
int cRow = warpM * WMMA_M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc,
wmma::mem_row_major);
for (int i = 0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc,
wmma::mem_row_major);
}
}
__host__ void matMultiplyOnHost(half *A, half *B, float *C, float alpha,
float beta, int numARows, int numAColumns,
int numBRows, int numBColumns, int numCRows,
int numCColumns) {
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
float temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
temp += (float)A[i * numAColumns + k] * (float)B[j * numBRows + k];
}
C[i * numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Tensor cores require a GPU of Volta (SM7X) architecture or higher.
if (deviceProp.major < 7) {
printf(
"cudaTensorCoreGemm requires SM 7.0 or higher to use Tensor "
"Cores. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
half *A_h = NULL;
half *B_h = NULL;
float *C_h = NULL;
#if CPU_DEBUG
float *result_hD = NULL;
float *result_host = NULL;
#endif
A_h = (half *)malloc(sizeof(half) * M_GLOBAL * K_GLOBAL);
B_h = (half *)malloc(sizeof(half) * K_GLOBAL * N_GLOBAL);
C_h = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
result_host = (float *)malloc(sizeof(float) * M_GLOBAL * N_GLOBAL);
#endif
half *A = NULL;
half *B = NULL;
float *C = NULL;
float *D = NULL;
checkCudaErrors(cudaMalloc((void **)&A, sizeof(half) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void **)&B, sizeof(half) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void **)&C, sizeof(float) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(cudaMalloc((void **)&D, sizeof(float) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(cudaMemcpy(A, A_h, sizeof(half) * M_GLOBAL * K_GLOBAL,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(B, B_h, sizeof(half) * N_GLOBAL * K_GLOBAL,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(C, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(D, 0, sizeof(float) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache
// per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the
// maximum of those
// two numbers.
SHMEM_SZ = MAX(
sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N *
(BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(float))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const float alpha = 1.1f;
const float beta = 1.2f;
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
// If enough shared memory available on the GPU use high performant kernel
if (deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) {
printf("Computing... using high performance kernel compute_gemm \n");
checkCudaErrors(cudaFuncSetAttribute(
compute_gemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors(
(compute_gemm<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D,
sizeof(float) * M_GLOBAL * N_GLOBAL,
cudaMemcpyDeviceToHost));
#endif
} else {
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (WMMA_M * blockDim.x / 32 - 1)) /
(WMMA_M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
simple_wmma_gemm<<<gridDim, blockDim>>>(A, B, C, D, M_GLOBAL, N_GLOBAL,
K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D,
sizeof(float) * M_GLOBAL * N_GLOBAL,
cudaMemcpyDeviceToHost));
#endif
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(float) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host, alpha, beta, M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL, M_GLOBAL, N_GLOBAL);
for (int i = 0; i < N_GLOBAL * M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i],
result_host[i]);
}
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2) /
(milliseconds / 1000.)) /
1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(cudaFree((void *)A));
checkCudaErrors(cudaFree((void *)B));
checkCudaErrors(cudaFree((void *)C));
checkCudaErrors(cudaFree((void *)D));
return 0;
}
|
4fa55602746280d01a984b3c34f86f430711aa96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) {
for (int i=0; i < var_1; ++i) {
comp += var_2 / -1.2786E36f * floorf((var_3 - +1.2688E-37f));
comp = (var_4 / +1.1791E-12f + var_5);
float tmp_1 = -1.5259E-37f / (var_6 - (-1.3554E-43f + (var_7 * var_8 + var_9)));
comp += tmp_1 / (+1.2461E-8f / (+1.7727E-35f + atanf(var_10 + var_11)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12);
hipDeviceSynchronize();
return 0;
}
| 4fa55602746280d01a984b3c34f86f430711aa96.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) {
for (int i=0; i < var_1; ++i) {
comp += var_2 / -1.2786E36f * floorf((var_3 - +1.2688E-37f));
comp = (var_4 / +1.1791E-12f + var_5);
float tmp_1 = -1.5259E-37f / (var_6 - (-1.3554E-43f + (var_7 * var_8 + var_9)));
comp += tmp_1 / (+1.2461E-8f / (+1.7727E-35f + atanf(var_10 + var_11)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12);
cudaDeviceSynchronize();
return 0;
}
|
519ad1eb8e9d2b1c4a8fde628581455ded42d899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vector_add(float *c, const float *a, const float *b, const int n) {
int i = threadIdx.x; /* <--- Oops! something is not right here! */
if (i<n) {
c[i] = a[i] + b[i];
}
} | 519ad1eb8e9d2b1c4a8fde628581455ded42d899.cu | #include "includes.h"
__global__ void vector_add(float *c, const float *a, const float *b, const int n) {
int i = threadIdx.x; /* <--- Oops! something is not right here! */
if (i<n) {
c[i] = a[i] + b[i];
}
} |
6584d3cd694b7ca4edfe9367de7472bc2d6715af.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include "ColorHelpers.cu"
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
__constant__ int D_COUNT;
__constant__ int D_X_SIZE;
__constant__ unsigned int D_BACKGROUND_COLOR_1;
__constant__ unsigned int D_BACKGROUND_COLOR_2;
__constant__ unsigned int D_MARKER_COLOR;
__constant__ int D_GRID_STEP;
__constant__ unsigned int D_GRID_COLOR;
//kernel code
__global__ void SpikeRasterObserverKernel(
unsigned int* pixels,
float *spikeValues,
int offset,
int ringArrayStart,
int gridStepCounter,
int renderingMethod,
float minValue,
float maxValue)
{
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if(globalThreadId < D_COUNT)
{
int newest = ringArrayStart;
unsigned int colorToWrite;
if(spikeValues[globalThreadId + offset] == 0)
{
colorToWrite = (globalThreadId % 2 == 1) * D_BACKGROUND_COLOR_1 + (globalThreadId % 2 == 0) * D_BACKGROUND_COLOR_2;
if(gridStepCounter == 0)
{
colorToWrite = D_GRID_COLOR;
}
}
else
{
colorToWrite = float_to_uint_rgba(spikeValues[globalThreadId + offset],
renderingMethod, 0 /*scale=Linear*/, minValue, maxValue);
}
pixels[globalThreadId * D_X_SIZE + newest] = colorToWrite;
pixels[globalThreadId * D_X_SIZE + (newest + 1) % D_X_SIZE] = D_MARKER_COLOR;
}
}
} | 6584d3cd694b7ca4edfe9367de7472bc2d6715af.cu | #include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include "ColorHelpers.cu"
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
__constant__ int D_COUNT;
__constant__ int D_X_SIZE;
__constant__ unsigned int D_BACKGROUND_COLOR_1;
__constant__ unsigned int D_BACKGROUND_COLOR_2;
__constant__ unsigned int D_MARKER_COLOR;
__constant__ int D_GRID_STEP;
__constant__ unsigned int D_GRID_COLOR;
//kernel code
__global__ void SpikeRasterObserverKernel(
unsigned int* pixels,
float *spikeValues,
int offset,
int ringArrayStart,
int gridStepCounter,
int renderingMethod,
float minValue,
float maxValue)
{
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if(globalThreadId < D_COUNT)
{
int newest = ringArrayStart;
unsigned int colorToWrite;
if(spikeValues[globalThreadId + offset] == 0)
{
colorToWrite = (globalThreadId % 2 == 1) * D_BACKGROUND_COLOR_1 + (globalThreadId % 2 == 0) * D_BACKGROUND_COLOR_2;
if(gridStepCounter == 0)
{
colorToWrite = D_GRID_COLOR;
}
}
else
{
colorToWrite = float_to_uint_rgba(spikeValues[globalThreadId + offset],
renderingMethod, 0 /*scale=Linear*/, minValue, maxValue);
}
pixels[globalThreadId * D_X_SIZE + newest] = colorToWrite;
pixels[globalThreadId * D_X_SIZE + (newest + 1) % D_X_SIZE] = D_MARKER_COLOR;
}
}
} |
b6ebfddd64ca0cdfd0ac180b7d24cdb0fb4e2305.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// size_t=unsigned int;
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//int idx = x + y * numCols; // Important! 0.028352 ms
int idx = y * numRows + x;
uchar4 rgba = rgbaImage[idx];
greyImage[idx] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
const dim3 gridSize( (numCols+15)/16, (numRows+15)/16, 1); //0.028024 ms.
//or use ceil
//const dim3 blockSize(32, 32, 1); //TODO
//const dim3 gridSize( (numCols+31)/32, (numRows+31)/32, 1); //0.037536 ms.
//const dim3 blockSize(16, 16, 1); //TODO
//const dim3 gridSize( (numRows+15)/16, (numCols+15)/16, 1); //0.049
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| b6ebfddd64ca0cdfd0ac180b7d24cdb0fb4e2305.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// size_t=unsigned int;
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//int idx = x + y * numCols; // Important! 0.028352 ms
int idx = y * numRows + x;
uchar4 rgba = rgbaImage[idx];
greyImage[idx] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
const dim3 gridSize( (numCols+15)/16, (numRows+15)/16, 1); //0.028024 ms.
//or use ceil
//const dim3 blockSize(32, 32, 1); //TODO
//const dim3 gridSize( (numCols+31)/32, (numRows+31)/32, 1); //0.037536 ms.
//const dim3 blockSize(16, 16, 1); //TODO
//const dim3 gridSize( (numRows+15)/16, (numCols+15)/16, 1); //0.049
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
9ad842d9608b09c63faf435ef229abf77a8e19f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <cmath>
#include <float.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "utility.cuh"
#include "parameterData.h"
#include "liggghtsData.h"
#include "compartment.cuh"
using namespace std;
#define TWOWAYCOUPLING false
// MACROS
// Calling macros for error check and dump data to files to VaribleName.txt
#define DUMP(varName) dumpData(varName, #varName)
#define DUMP2D(varName) dump2DData(varName, #varName)
#define DUMP3D(varName) dump3DData(varName, #varName)
#define DUMPCSV(varName) dumpCSV(varName, #varName)
#define DUMP2DCSV(varName) dump2DCSV(varName, #varName)
#define DUMP3DCSV(varName) dump3DCSV(varName, #varName)
#define DUMPDIACSV(time, dia) dumpDiaCSV(time, dia, #dia)
#define DUMP2DCSV4MATLAB(varName) dump2DCSV4Matlab(varName, #varName)
// extern __shared__ double *d_sMeshXY, *d_ssMeshXY;
// ==================================== INITIALIZATION KERNEL ===================================================
__global__ void initialization_kernel(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
// __shared__ double d_sMeshXY[256], d_ssMeshXY[256];
d_sMeshXY[bdx * bix + idx] = d_vs[bix];
d_ssMeshXY[bdx * bix + idx] = d_vss[idx];
d_sAgg[bdx * bix + idx] = d_vs[idx] + d_vs[bix];
d_ssAgg[bdx * bix + idx] = d_vss[idx] + d_vss[bix];
d_sAggregationCheck[bdx * bix + idx] = d_sAgg[bdx * bix + idx] <= d_vs[bdx - 1] ? 1 : 0;
d_ssAggregationCheck[bdx * bix + idx] = d_ssAgg[bdx * bix + idx] <= d_vss[bdx - 1] ? 1 : 0;
d_sLow [bdx * bix + idx] = d_sMeshXY[bdx * bix + idx];
d_ssLow[bdx * bix + idx] = d_ssMeshXY[bdx * bix + idx];
__syncthreads();
if (bix < bdx -1)
d_sHigh[bdx * bix + idx] = d_sMeshXY[bdx * (bix + 1) + idx];
d_ssHigh[bdx * bix + idx] = d_ssMeshXY[bdx * (bix) + idx +1];
d_sHigh[bdx * (bdx -1) + idx] = 0.0;
d_ssHigh[bdx * bix + bdx - 1] = 0.0;
d_sLoc[bdx * bix + idx] = floor(log(d_sAgg[bdx * bix + idx] / fsVolCoeff) / log(fsVolBase) + 1);
d_ssLoc[bdx * bix + idx] = floor(log(d_ssAgg[bdx * bix + idx] / ssVolCoeff) / log(ssVolBase) + 1);
d_sInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
d_ssInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
__syncthreads();
double value = d_vs[idx] - d_vs[bix];
double value1 = d_vss[idx] - d_vss[bix];
d_sBreak[bdx * bix + idx] = value < 0.0 ? 0.0 : value;
d_ssBreak[bdx * bix + idx] = value1 < 0.0 ? 0.0 : value1;
d_sLocBreak[bdx * bix + idx] = (d_sBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_sBreak[bdx * idx + bix] / fsVolCoeff) / log(fsVolBase) + 1));
d_ssLocBreak[bdx * bix + idx] = (d_ssBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_ssBreak[bdx * idx + bix] / ssVolCoeff) / log(ssVolBase) + 1));
__syncthreads();
d_sCheckB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_ssCheckB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_sIndB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx];
d_ssIndB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx];
if (d_sIndB[bdx * bix + idx] < 1)
d_sIndB[bdx * bix + idx] = bdx + 1;
if (d_ssIndB[bdx * bix + idx] < 1)
d_ssIndB[bdx * bix + idx] = bdx + 1;
}
// ================================= COMPARTMENT LAUNCH KERNEL ============================================================
__global__ void launchCompartment(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, double time, double timeStep, double initialTime,
double *d_formationThroughAggregation, double *d_depletionThroughAggregation, double *d_formationThroughBreakage, double *d_depletionThroughBreakage,
double *d_fAllCompartments, double *d_flAllCompartments, double *d_fgAllCompartments, double *d_liquidAdditionRateAllCompartments,
unsigned int size2D, unsigned int size3D, unsigned int size4D, double *d_fIn, double initPorosity, double demTimeStep, int nFirstSolidBins, int nSecondSolidBins,
double granulatorLength, double partticleResTime, double premixTime, double liqAddTime, double consConst, double minPorosity, int nCompartments,
double granSatFactor, double aggKernelConst, double brkKernelConst)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
if (tix ==0)
{
d_compartmentOut->formationThroughAggregation[bix] = 0.0;
d_compartmentOut->depletionThroughAggregation[bix] = 0.0;
d_compartmentOut->formationThroughBreakage[bix] = 0.0;
d_compartmentOut->depletionThroughBreakage[bix] = 0.0;
}
// int tiy = threadIdx.y;
// int idx = bix * bdx * bdy + tiy * bdx + tix;
__syncthreads();
//if (tiy == 0)
d_compartmentIn->fAll[idx3] = d_fAllCompartments[idx3];
d_compartmentIn->fLiquid[idx3] = d_flAllCompartments[idx3];
d_compartmentIn->fGas[idx3] = d_fgAllCompartments[idx3];
d_compartmentIn->liquidAdditionRate[bix] = d_liquidAdditionRateAllCompartments[bix];
if (bix == 0)
{
d_prevCompInData->fAllPreviousCompartment[idx3] = 0.0;
d_prevCompInData->flPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fgPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fAllComingIn[idx3] = d_fIn[tix];
d_prevCompInData->fgComingIn[idx3] = 0.0;
double value = initPorosity * timeStep;
d_prevCompInData->fgComingIn[idx3] = d_fIn[tix] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) * value;
}
else
{
d_prevCompInData->fAllPreviousCompartment[idx3] = d_fAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->flPreviousCompartment[idx3] = d_flAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fgPreviousCompartment[idx3] = d_fgAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fAllComingIn[idx3] = 0.0;
d_prevCompInData->fgComingIn[idx3] = 0.0;
}
if (fabs(d_compartmentIn->fAll[idx3]) > 1e-16)
{
d_compartmentOut->liquidBins[idx3] = d_compartmentIn->fLiquid[idx3] / d_compartmentIn->fAll[idx3];
d_compartmentOut->gasBins[idx3] = d_compartmentIn->fGas[idx3] / d_compartmentIn->fAll[idx3];
}
else
{
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
}
d_aggCompVar->depletionThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfGasThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfLiquidThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->liquidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->gasBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthAggLowHigh[idx3] = 0.0;
d_aggCompVar->birthAggLowHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowHighGas[idx3] = 0.0;
d_aggCompVar->birthAggHighLow[idx3] = 0.0;
d_aggCompVar->birthAggHighLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighLowGas[idx3] = 0.0;
d_aggCompVar->birthAggLowLow[idx3] = 0.0;
d_aggCompVar->birthAggLowLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowLowGas[idx3] = 0.0;
d_aggCompVar->birthAggHighHigh[idx3] = 0.0;
d_aggCompVar->birthAggHighHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighHighGas[idx3] = 0.0;
d_aggCompVar->formationThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfGasThroughAggregationCA[idx3] = 0.0;
d_brCompVar->depletionThroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfLiquidthroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfGasThroughBreakage[idx3] = 0.0;
d_brCompVar->birthThroughBreakage1[idx3] = 0.0;
d_brCompVar->birthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->formationThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfLiquidThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfGasThroughBreakageCA[idx3] = 0.0;
__syncthreads();
d_compVar->internalLiquid[idx3] = min((granSatFactor * d_compartmentOut->gasBins[idx3]), d_compartmentOut->liquidBins[idx3]);
d_compartmentOut->internalVolumeBins[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix] + d_compVar->internalLiquid[idx3] + d_compartmentOut->gasBins[idx3];
d_compVar->externalLiquid[idx3] = max(0.0, (d_compartmentOut->liquidBins[idx3] - d_compVar->internalLiquid[idx3]));
// printf("d_compartmentOut->liquidBins = %f \n", d_compartmentOut->liquidBins[tix]);
dim3 compKernel_nblocks, compKernel_nthreads;
hipStream_t stream1, stream2;
hipError_t result1, result2, err;
result1 = hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking);
result2 = hipStreamCreateWithFlags(&stream2, hipStreamNonBlocking);
hipDeviceSynchronize();
hipLaunchKernelGGL(( performAggCalculations), dim3(1),dim3(size2D), 0, stream1, d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_aggCompVar, time, timeStep, initialTime, demTimeStep, bix, tix, bdx, nFirstSolidBins, nSecondSolidBins, nCompartments, aggKernelConst);
hipLaunchKernelGGL(( performBreakageCalculations), dim3(1),dim3(size2D),0,stream2, d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_brCompVar, time, timeStep, initialTime, demTimeStep, bix, tix, bdx, nFirstSolidBins, nSecondSolidBins, brkKernelConst);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", hipGetErrorString(err));
}
hipDeviceSynchronize();
result1 = hipStreamDestroy(stream1);
result2 = hipStreamDestroy(stream2);
if (result1 != hipSuccess || result2 != hipSuccess)
{
printf("Failed to launch streams1 kernel (error code %s)!\n", hipGetErrorString(result1));
printf("Failed to launch streams2 kernel (error code %s)!\n", hipGetErrorString(result2));
}
d_compVar->meshXYSum[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix];
double maxValue = -DBL_MAX;
for (size_t d1 = bix * bdx; d1 < (bix+1) * bdx; d1++)
{
maxValue = max(maxValue, d_compVar->meshXYSum[d1]);
}
__syncthreads();
double valueMeshXY = 1 - (d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix]) / maxValue;
double distanceBetweenCompartments = granulatorLength / nCompartments;
double particleAverageVelocity = granulatorLength / partticleResTime;
double distanceMoved = particleAverageVelocity * timeStep / distanceBetweenCompartments;// value renamed as distanceMoved
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compartmentOut->dfAlldt[idx3] = 0.0;
d_compartmentOut->dfLiquiddt[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = 0.0;
d_compVar->particleMovement[idx3] = d_prevCompInData->fAllComingIn[idx3];
d_compVar->particleMovement[idx3] += d_prevCompInData->fAllPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->particleMovement[idx3] -= d_compartmentIn->fAll[idx3] * distanceMoved;
d_compVar->liquidMovement[idx3] = d_prevCompInData->flPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->liquidMovement[idx3] -= d_compartmentIn->fLiquid[idx3] * distanceMoved;
d_compVar->gasMovement[idx3] = d_prevCompInData->fgComingIn[idx3];
d_compVar->gasMovement[idx3] += d_prevCompInData->fgPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->gasMovement[idx3] -= d_compartmentIn->fGas[idx3] * distanceMoved;
double finalTime = premixTime + liqAddTime + initialTime;
if (tix == 0)
{
if (time >= premixTime && time <= finalTime)
d_compartmentIn->liquidAdditionRate[bix] *= timeStep;
else
d_compartmentIn->liquidAdditionRate[bix] = 0.0;
}
double totalSolidvolume = 0.0;
for (int i = bix * bdx; i < (bix+1) * bdx; i++)
totalSolidvolume += d_compartmentIn->fAll[i] * (d_compartmentIn->vs[(int) floorf((i - bix * bdx) / nFirstSolidBins)] + d_compartmentIn->vss[(i - bix * bdx) % nSecondSolidBins]);
__syncthreads();
d_compartmentOut->dfAlldt[idx3] = d_compVar->particleMovement[idx3];
d_compartmentOut->dfAlldt[idx3] += d_aggCompVar->formationThroughAggregationCA[idx3] - d_aggCompVar->depletionThroughAggregation[idx3];
d_compartmentOut->dfAlldt[idx3] += d_brCompVar->birthThroughBreakage1[idx3] + d_brCompVar->formationThroughBreakageCA[idx3] - d_brCompVar->depletionThroughBreakage[idx3];
if (totalSolidvolume > 1.0e-16)
d_brCompVar->transferThroughLiquidAddition[idx3] = d_compartmentIn->liquidAdditionRate[bix] * ((d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) / totalSolidvolume);
d_compartmentOut->dfLiquiddt[idx3] = d_compVar->liquidMovement[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughLiquidAddition[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] - d_aggCompVar->depletionOfLiquidThroughAggregation[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_brCompVar->liquidBirthThroughBreakage1[idx3] + d_brCompVar->formationOfLiquidThroughBreakageCA[idx3];
d_compartmentOut->dfLiquiddt[idx3] -= d_brCompVar->depletionOfLiquidthroughBreakage[idx3];
if(d_compartmentIn->fGas[idx3] > 1.0e-16)
{
d_brCompVar->transferThroughConsolidation[idx3] = consConst * d_compartmentOut->internalVolumeBins[idx3] * ((1 - minPorosity) / (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]));
d_brCompVar->transferThroughConsolidation[idx3] *= (d_compartmentOut->gasBins[idx3] - (minPorosity / (1-minPorosity)) * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) + d_compVar->internalLiquid[idx3]);
}
d_compartmentOut->dfGasdt[idx3] = d_compVar->gasMovement[idx3];
d_compartmentOut->dfGasdt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughConsolidation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_aggCompVar->formationOfGasThroughAggregationCA[idx3] - d_aggCompVar->depletionOfGasThroughAggregation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_brCompVar->gasBirthThroughBreakage1[idx3] + d_brCompVar->formationOfGasThroughBreakageCA[idx3];
d_compartmentOut->dfGasdt[idx3] -= d_brCompVar->depletionOfGasThroughBreakage[idx3];
__syncthreads();
if (tix == 0)
{
for (int i = bix * bdx; i < ((bix +1) * bdx); i++)
{
d_compartmentOut->formationThroughAggregation[bix] += d_aggCompVar->formationThroughAggregationCA[i];
d_compartmentOut->depletionThroughAggregation[bix] += d_aggCompVar->depletionThroughAggregation[i];
d_compartmentOut->formationThroughBreakage[bix] += d_brCompVar->formationThroughBreakageCA[i] + d_brCompVar->gasBirthThroughBreakage1[idx3];
d_compartmentOut->depletionThroughBreakage[bix] += d_brCompVar->depletionThroughBreakage[i];
}
}
}
// ===================================== MAIN FUNCTION ======================================================
int main(int argc, char *argv[])
{
cout << "Code begins..." << endl;
// Read passed arguments
string startTimeStr;
double startTime = 0.0;
liggghtsData *lData = nullptr;
parameterData *pData = nullptr;
string coreVal;
string diaVal;
string pbmInFilePath;
string timeVal;
if (argc <5)
{
cout << "All values are not available as imput parameters " << endl;
return 1;
}
pbmInFilePath = string(argv[1]);
coreVal = string(argv[2]);
diaVal = string(argv[3]);
timeVal = string(argv[4]);
pData = parameterData::getInstance();
pData->readPBMInputFile(pbmInFilePath);
int nCompartments = pData->nCompartments;
unsigned int nFirstSolidBins = pData->nFirstSolidBins;
unsigned int nSecondSolidBins = pData->nSecondSolidBins;
size_t size1D = nFirstSolidBins;
size_t size2D = size1D * nSecondSolidBins;
size_t size3D = size2D * nCompartments;
size_t size4D = size2D * size2D;
size_t size5D = size4D * nCompartments;
CompartmentIn compartmentIn(size2D, size5D, 0), x_compartmentIn(size2D, size5D, 1), *d_compartmentIn;
PreviousCompartmentIn prevCompInData(size2D, size5D, 0), x_prevCompInData(size2D, size5D, 1), *d_prevCompInData;
CompartmentOut compartmentOut(size2D, size5D, 0), x_compartmentOut(size2D, size5D, 1), *d_compartmentOut;
CompartmentDEMIn compartmentDEMIn(size2D, size5D, 0), x_compartmentDEMIn(size2D, size5D, 1), *d_compartmentDEMIn;
vector<double> h_vs(size1D, 0.0);
vector<double> h_vss(size1D, 0.0);
// Bin Initiation
double fsVolCoeff = pData->fsVolCoeff;
double fsVolBase = pData->fsVolBase;
for (size_t i = 0; i < nFirstSolidBins; i++)
h_vs[i] = fsVolCoeff * pow(fsVolBase, i); // m^3
double ssVolCoeff = pData->ssVolCoeff;
double ssVolBase = pData->ssVolBase;
for (size_t i = 0; i < nSecondSolidBins; i++)
h_vss[i] = ssVolCoeff * pow(ssVolBase, i); // m^3
arrayOfDouble2D diameter1 = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
diameter1[s][ss] = cbrt((6/M_PI) * (h_vs[s] + h_vss[ss]));
vector<double> diameter = linearize2DVector(diameter1);
vector<double> particleIn;
particleIn.push_back(726657587.0);
particleIn.push_back(286654401.0);
particleIn.push_back(118218011.0);
particleIn.push_back(50319795.0);
particleIn.push_back(20954036.0);
particleIn.push_back(7345998.0);
particleIn.push_back(1500147.0);
particleIn.push_back(76518.0);
particleIn.push_back(149.0);
vector<double> h_fIn(size2D, 0.0);
for (size_t i = 0; i < particleIn.size(); i++)
h_fIn[i * size1D + i] = particleIn[i];
// allocation of memory for the matrices that will be copied onto the device from the host
double *d_vs = device_alloc_double_vector(size1D);
double *d_vss = device_alloc_double_vector(size1D);
double *d_sMeshXY = device_alloc_double_vector(size2D);
double *d_ssMeshXY = device_alloc_double_vector(size2D);
double *d_sAgg = device_alloc_double_vector(size2D);
double *d_ssAgg = device_alloc_double_vector(size2D);
int *d_sAggregationCheck = device_alloc_integer_vector(size2D);
int *d_ssAggregationCheck = device_alloc_integer_vector(size2D);
double *d_sLow = device_alloc_double_vector(size2D);
double *d_ssLow = device_alloc_double_vector(size2D);
double *d_sHigh = device_alloc_double_vector(size2D);
double *d_ssHigh = device_alloc_double_vector(size2D);
int *d_sLoc = device_alloc_integer_vector(size2D);
int *d_ssLoc = device_alloc_integer_vector(size2D);
int *d_sInd = device_alloc_integer_vector(size2D);
int *d_ssInd = device_alloc_integer_vector(size2D);
double *d_sBreak = device_alloc_double_vector(size2D);
double *d_ssBreak = device_alloc_double_vector(size2D);
int *d_sLocBreak = device_alloc_integer_vector(size2D);
int *d_ssLocBreak = device_alloc_integer_vector(size2D);
int *d_sCheckB = device_alloc_integer_vector(size2D);
int *d_ssCheckB = device_alloc_integer_vector(size2D);
int *d_sIndB = device_alloc_integer_vector(size2D);
int *d_ssIndB = device_alloc_integer_vector(size2D);
// defining vectors for data required for compartment calculations
vector<double> h_sMeshXY(size2D, 0.0);
vector<double> h_ssMeshXY(size2D, 0.0);
vector<int> h_sAggregationCheck(size2D, 0);
vector<int> h_ssAggregationCheck(size2D, 0);
vector<double> h_sLow(size2D, 0.0);
vector<double> h_ssLow(size2D, 0.0);
vector<double> h_sHigh(size2D, 0.0);
vector<double> h_ssHigh(size2D, 0.0);
vector<int> h_sInd(size2D, 0);
vector<int> h_ssInd(size2D, 0);
vector<int> h_sCheckB(size2D, 0);
vector<int> h_ssCheckB(size2D, 0);
vector<int> h_sIndB(size2D, 0.0);
vector<int> h_ssIndB(size2D, 0.0);
vector<int> h_sLocBreak(size2D, 0.0);
vector<int> h_ssLocBreak(size2D, 0.0);
vector<double> h_sBreak(size2D, 0.0);
vector<double> h_ssBreak(size2D, 0.0);
copy_double_vector_fromHtoD(d_vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(d_vss, h_vss.data(), size1D);
int nBlocks = nFirstSolidBins;
int nThreads = nSecondSolidBins;
hipLaunchKernelGGL(( initialization_kernel), dim3(nBlocks),dim3(nThreads), 0, 0, d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "Initialization complete" << endl;
// copy back data required for the compartment calculations
copy_double_vector_fromDtoH(h_sMeshXY.data(), d_sMeshXY, size2D);
copy_double_vector_fromDtoH(h_ssMeshXY.data(), d_ssMeshXY, size2D);
copy_integer_vector_fromDtoH(h_sAggregationCheck.data(), d_sAggregationCheck, size2D);
copy_integer_vector_fromDtoH(h_ssAggregationCheck.data(), d_ssAggregationCheck, size2D);
copy_double_vector_fromDtoH(h_sLow.data(), d_sLow, size2D);
copy_double_vector_fromDtoH(h_ssLow.data(), d_ssLow, size2D);
copy_double_vector_fromDtoH(h_sHigh.data(), d_sHigh, size2D);
copy_double_vector_fromDtoH(h_ssHigh.data(), d_ssHigh, size2D);
copy_integer_vector_fromDtoH(h_sInd.data(), d_sInd, size2D);
copy_integer_vector_fromDtoH(h_ssInd.data(), d_ssInd, size2D);
copy_integer_vector_fromDtoH(h_sCheckB.data(), d_sCheckB, size2D);
copy_integer_vector_fromDtoH(h_ssCheckB.data(), d_ssCheckB, size2D);
copy_integer_vector_fromDtoH(h_sIndB.data(), d_sIndB, size2D);
copy_integer_vector_fromDtoH(h_ssIndB.data(), d_ssIndB, size2D);
copy_integer_vector_fromDtoH(h_sLocBreak.data(), d_sLocBreak, size2D);
copy_integer_vector_fromDtoH(h_ssLocBreak.data(), d_ssLocBreak, size2D);
copy_double_vector_fromDtoH(h_sBreak.data(), d_sBreak, size2D);
copy_double_vector_fromDtoH(h_ssBreak.data(), d_ssBreak, size2D);
hipDeviceSynchronize();
DUMP(h_sMeshXY);
DUMP(h_ssMeshXY);
DUMP(h_sAggregationCheck);
DUMP(h_ssAggregationCheck);
DUMP(h_sLow);
DUMP(h_ssLow);
DUMP(h_sHigh);
DUMP(h_ssHigh);
DUMP(h_sInd);
DUMP(h_ssInd);
DUMP(h_sCheckB);
DUMP(h_ssCheckB);
DUMP(h_sIndB);
DUMP(h_ssIndB);
DUMP(h_sLocBreak);
DUMP(h_ssLocBreak);
DUMP(h_sBreak);
DUMP(h_ssBreak);
vector<double> h_fAllCompartments(size3D, 0.0);
vector<double> h_flAllCompartments(size3D, 0.0);
vector<double> h_fgAllCompartments(size3D, 0.0);
vector<double> h_dfdtAllCompartments(size3D, 0.0);
vector<double> h_dfldtAllCompartments(size3D, 0.0);
vector<double> h_dfgdtAllCompartments(size3D, 0.0);
vector<double> h_externalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_liquidBinsAllCompartments(size3D, 0.0);
vector<double> h_gasBinsAllCompartments(size3D, 0.0);
vector<double> h_totalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalLiquidAllCompartments(size3D, 0.0);
vector<double> h_externalLiquidAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBins(size2D, 0.0);
vector<double> h_externalVolumeBins(size2D, 0.0);
lData = liggghtsData::getInstance();
lData->readLiggghtsDataFiles(coreVal, diaVal);
vector<double> DEMDiameter = lData->getDEMParticleDiameters();
if ((DEMDiameter).size() == 0)
{
cout << "Diameter data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> DEMImpactData = lData->getFinalDEMImpactData();
if ((DEMImpactData).size() == 0)
{
cout << "Impact data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
arrayOfDouble2D DEMCollisionData = lData->getFinalDEMCollisionData();
if (DEMCollisionData.size() == 0)
{
cout << "Collision data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> velocity = lData->getFinalDEMImpactVelocity();
if (velocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> colVelocity = lData->getFinalDEMCollisionVelocity();
if (colVelocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS collision output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
// moved velocity based probability calculation to the model from kernel.cpp to reduce computation
double demTimeStep = pData->demTimeStep;
copy_double_vector_fromHtoD(x_compartmentDEMIn.velocityCol, colVelocity.data(), size1D);
double inverseDiameterSum = 0.0;
double inverseMassSum = 0.0;
int sized = DEMDiameter.size();
double solDensity = pData->solDensity;
for (int i = 0; i < sized; i++)
{
inverseDiameterSum += (1 / DEMDiameter[i]);
inverseMassSum += (1 / ((4 / 3) * M_PI * pow((DEMDiameter[i] / 2), 3) * solDensity));
}
double coefOfRest = pData->coefOfRest;
double liqThick = pData->liqThick;
double surfAsp = pData->surfAsp;
double bindVisc = pData->bindVisc;
double sumVelo = 0.0;
double harmonic_diameter = sized / inverseDiameterSum;
double harmonic_mass = sized / inverseMassSum;
double uCritical = (10 + (1 / coefOfRest)) * log((liqThick / surfAsp)) * (3 * M_PI * pow(harmonic_diameter, 2) * bindVisc) / (8 * harmonic_mass);
// x_compartmentDEMIn.uCriticalCol[0] = uCritical;
copy_double_vector_fromHtoD(x_compartmentDEMIn.uCriticalCol, &uCritical, 1);
// cout << "Critical velocity for agg is " << uCritical << endl;
int veloSize = colVelocity.size();
for (int i = 0; i < veloSize; i++)
sumVelo += colVelocity[i];
unsigned int nDEMBins = pData->nDEMBins;
double averageVelocity = sumVelo / nDEMBins;
double stdDevVelocity = 0.0;
double varianceVelocity = 0.0;
for (int i = 0; i < veloSize; ++i)
varianceVelocity += pow((colVelocity[i] - averageVelocity), 2) / nDEMBins;
stdDevVelocity = sqrt(varianceVelocity);
//double intVelocity = 0.0;
vector<double> colProbablityOfVelocity(veloSize, 0.0);
for (int i = 0; i < veloSize; i++)
{
colProbablityOfVelocity[i] = (1 / (colVelocity[i] * sqrt(2 * M_PI) * stdDevVelocity)) * exp(-((log(colVelocity[i]) - averageVelocity) / (2 * pow(varianceVelocity, 2))));
// cout << "Probability at " << velocity[i] << "is " << colProbablityOfVelocity[i] << endl;
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.colProbability, colProbablityOfVelocity.data(), size1D);
// vector<double> impactFrequency = DEMImpactData;
// for (int s = 0; s < nFirstSolidBins; s++)
// for (int ss = 0; ss < nSecondSolidBins; ss++)
// for (int i = 0; i < nDEMBins; i++)
// {
// if (fAll[n2] > 0.0)
// impactFrequency[i] = (DEMImpactData[i] * timeStep) / demTimeStep;
// }
double critStDefNum = pData->critStDefNum;
double initPorosity = pData->initPorosity;
double Ubreak = (2 * critStDefNum / solDensity) * (9 / 8.0) * (pow((1 - initPorosity), 2) / pow(initPorosity, 2)) * (9 / 16.0) * (bindVisc / DEMDiameter[0]);
// x_compartmentDEMIn.ubreak[0] = Ubreak;
copy_double_vector_fromHtoD(x_compartmentDEMIn.ubreak, &Ubreak, 1);
int size1 = velocity.size();
double sum = 0.0;
for (int i = 0; i < size1; i++)
sum += velocity[i];
double averageVelocityBr = sum / nDEMBins;
double stdDevVelocityBr = 0.0;
double varianceVelocityBr = 0.0;
for (int i = 0; i < size1; ++i)
{
varianceVelocityBr += pow((velocity[i] - averageVelocityBr), 2) / nDEMBins;
}
stdDevVelocityBr = sqrt(varianceVelocityBr);
//double intVelocity = 0.0;
// cout << "Std Dev. of Velocity = " << stdDevVelocity << endl;
vector<double> breakageProbablityOfVelocity(size1, 0.0);
for (int i = 0; i < size1; i++)
{
if (velocity[i] != 0)
{
breakageProbablityOfVelocity[i] = (1 / (velocity[i] * sqrt(2 * M_PI) * stdDevVelocityBr)) * exp(-((log(velocity[i]) - averageVelocityBr) / (2 * pow(varianceVelocityBr, 2))));
}
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.brProbability, breakageProbablityOfVelocity.data(), size1D);
DUMP2D(DEMCollisionData);
DUMP(DEMDiameter);
DUMP(DEMImpactData);
DUMP(velocity);
//Initialize DEM data for compartment
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMDiameter, DEMDiameter.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMCollisionData, linearize2DVector(DEMCollisionData).data(), size2D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMImpactData, DEMImpactData.data(), size1D);
vector<double> liquidAdditionRateAllCompartments(nCompartments, 0.0);
double liqSolidRatio = pData->liqSolidRatio;
double throughput = pData->throughput;
double liqDensity = pData->liqDensity;
double liquidAddRate = (liqSolidRatio * throughput) / (liqDensity * 3600);
liquidAdditionRateAllCompartments[0] = liquidAddRate;
arrayOfDouble2D h_fAllCompartmentsOverTime;
arrayOfDouble2D h_externalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_internalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_liquidBinsAllCompartmentsOverTime;
arrayOfDouble2D h_gasBinsAllCompartmentsOverTime;
double granulatorLength = pData->granulatorLength;
double partticleResTime = pData->partticleResTime;
double particleAveVelo = granulatorLength / partticleResTime;
vector<double> particleAverageVelocity(nCompartments, particleAveVelo);
//Initialize input data for compartment
copy_double_vector_fromHtoD(x_compartmentIn.vs, h_vs.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.vss, h_vss.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.diameter, diameter.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sMeshXY, h_sMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssMeshXY, h_ssMeshXY.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sAggregationCheck, h_sAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssAggregationCheck, h_ssAggregationCheck.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sLow, h_sLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sHigh, h_sHigh.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssLow, h_ssLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssHigh, h_ssHigh.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sInd, h_sInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssInd, h_ssInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sCheckB, h_sCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssCheckB, h_ssCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sIndB, h_sIndB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssIndB, h_ssIndB.data(), size2D);
vector<int> sieveGrid;
sieveGrid.push_back(38);
sieveGrid.push_back(63);
sieveGrid.push_back(90);
sieveGrid.push_back(125);
sieveGrid.push_back(250);
sieveGrid.push_back(355);
sieveGrid.push_back(500);
sieveGrid.push_back(710);
sieveGrid.push_back(850);
sieveGrid.push_back(1000);
sieveGrid.push_back(1400);
sieveGrid.push_back(2000);
sieveGrid.push_back(2380);
sieveGrid.push_back(4000);
size_t nSieveGrid = sieveGrid.size();
vector<double> d10OverTime(size2D, 0.0);
vector<double> d50OverTime(size2D, 0.0);
vector<double> d90OverTime(size2D, 0.0);
double time = stod(timeVal); // initial time to start PBM
double timeStep = 0.5; //1.0e-1;
vector<double> Time;
double lastTime = time;
int timeIdxCount = 0;
int lastTimeIdxCount = 0;
double premixTime = pData->premixTime;
double liqAddTime = pData->liqAddTime;
double postMixTime = pData->postMixTime;
double finalTime = premixTime + liqAddTime + postMixTime + stod(timeVal);
vector<double *> formationThroughAggregationOverTime;
vector<double *> depletionThroughAggregationOverTime;
vector<double *> formationThroughBreakageOverTime;
vector<double *> depletionThroughBreakageOverTime;
cout << "time" << endl;
// defining compartment varibale pointers
CompartmentVar compVar(size3D, size5D, 0), d_compVarCpy(size3D, size5D, 1), *d_compVar;
AggregationCompVar aggCompVar(size3D, size5D, 0), x_aggCompVar(size3D, size5D, 1), *d_aggCompVar;
BreakageCompVar brCompVar(size3D, size5D, 0), x_brCompVar(size3D, size5D, 1), *d_brCompVar;
// allocating memory for structures used for compartment calculations
err = hipMalloc(&d_compartmentIn, sizeof(CompartmentIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : CompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_prevCompInData, sizeof(PreviousCompartmentIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : prevCompInData (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_compartmentDEMIn, sizeof(CompartmentDEMIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : compartmentDEMIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**) &d_compVar, sizeof(CompartmentVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : CompartmentVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_aggCompVar, sizeof(AggregationCompVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : AggregationCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_brCompVar, sizeof(BreakageCompVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : BreakageCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_compartmentOut, sizeof(CompartmentOut));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : d_compartmentOut (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// // copying data to the allocated GPU
hipMemcpy(d_compartmentIn, &x_compartmentIn, sizeof(CompartmentIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_prevCompInData, &x_prevCompInData, sizeof(PreviousCompartmentIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : PreviousCompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compartmentDEMIn, &x_compartmentDEMIn, sizeof(CompartmentDEMIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentDEMIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compVar, &d_compVarCpy, sizeof(CompartmentVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
double aggKernelConst = pData->aggKernelConst;
// x_aggCompVar.aggKernelConst[0] = aggKernelConst;
copy_double_vector_fromHtoD(x_aggCompVar.aggKernelConst, &aggKernelConst, 1);
double brkKernelConst = pData->brkKernelConst;
// x_brCompVar.brkKernelConst[0] = brkKernelConst;
copy_double_vector_fromHtoD(x_brCompVar.brkKernelConst, &brkKernelConst, 1);
hipMemcpy(d_aggCompVar, &x_aggCompVar, sizeof(AggregationCompVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : AggregationCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_brCompVar, &x_brCompVar, sizeof(BreakageCompVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : BreakageCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compartmentOut, &x_compartmentOut, sizeof(CompartmentOut), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : compartmentOut (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// vector<double> h_formationThroughAggregation(nCompartments, 0.0);
// vector<double> h_depletionThroughAggregation(nCompartments, 0.0);
// vector<double> h_formationThroughBreakage(nCompartments, 0.0);
// vector<double> h_depletionThroughBreakage(nCompartments, 0.0);
double *d_formationThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_depletionThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_formationThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_depletionThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_fAllCompartments = device_alloc_double_vector(size3D);
double *d_flAllCompartments = device_alloc_double_vector(size3D);
double *d_fgAllCompartments = device_alloc_double_vector(size3D);
double *d_liquidAdditionRateAllCompartments = device_alloc_double_vector(nCompartments);
double *d_fIn = device_alloc_double_vector(size2D);
copy_double_vector_fromHtoD(d_liquidAdditionRateAllCompartments, liquidAdditionRateAllCompartments.data(), nCompartments);
copy_double_vector_fromHtoD(d_fIn, h_fIn.data(), size2D);
// dim3 compKernel_nblocks, compKernel_nthreads;
// compKernel_nblocks = dim3(nCompartments,1,1);
// compKernel_nthreads = dim3(size2D, size2D,1);
int compKernel_nblocks = 16;
int compKernel_nthreads = size2D * size2D;
// double granulatorLength = pData->granulatorLength;
// double partticleResTime = pData->partticleResTime;
// double premixTime = pData->premixTime;
// double liqAddTime = pData->liqAddTime;
double consConst = pData->consConst;
double minPorosity = pData->minPorosity;
double granSatFactor = pData->granSatFactor;
int threads = size2D;
hipDeviceSynchronize();
while (time <= finalTime)
{
CompartmentOut h_results(size2D, size5D, 1);
copy_double_vector_fromHtoD(d_fAllCompartments, h_fAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_flAllCompartments, h_flAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_fgAllCompartments, h_fgAllCompartments.data(), size3D);
hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 0);
hipLaunchKernelGGL(( launchCompartment), dim3(nCompartments),dim3(threads), 0, 0, d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar,
time, timeStep, stod(timeVal), d_formationThroughAggregation, d_depletionThroughAggregation,d_formationThroughBreakage,
d_depletionThroughBreakage, d_fAllCompartments, d_flAllCompartments, d_fgAllCompartments,
d_liquidAdditionRateAllCompartments, size2D, size3D, size4D, d_fIn, initPorosity, demTimeStep, nFirstSolidBins, nSecondSolidBins,
granulatorLength, partticleResTime, premixTime, liqAddTime, consConst, minPorosity, nCompartments, granSatFactor, aggKernelConst, brkKernelConst);
// hipDeviceSynchronize();
err = hipSuccess; // check kernel launach
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch launchCompartment kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "Compartment ended " << endl;
// Copying data strcutres reqd for calculation
err = hipMemcpy(&h_results, d_compartmentOut, sizeof(CompartmentOut), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentOut D to Hmake (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// copy necessary variables back to the CPU
copy_double_vector_fromDtoH(compartmentOut.dfAlldt, h_results.dfAlldt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfLiquiddt, h_results.dfLiquiddt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfGasdt, h_results.dfGasdt, size3D);
copy_double_vector_fromDtoH(compartmentOut.liquidBins, h_results.liquidBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.gasBins, h_results.gasBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughAggregation, h_results.formationThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughAggregation, h_results.depletionThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughBreakage, h_results.formationThroughBreakage, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughBreakage, h_results.depletionThroughBreakage, size1D);
// copy_double_vector_fromDtoH(h_fAllCompartments.data(), d_fAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_flAllCompartments.data(), d_flAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_fgAllCompartments.data(), d_fgAllCompartments, size3D);
formationThroughAggregationOverTime.push_back(compartmentOut.formationThroughAggregation);
depletionThroughAggregationOverTime.push_back(compartmentOut.depletionThroughAggregation);
formationThroughBreakageOverTime.push_back(compartmentOut.formationThroughBreakage);
depletionThroughBreakageOverTime.push_back(compartmentOut.depletionThroughBreakage);
for (int w = 0; w < nCompartments; w++)
{
cout << "compartmentOut.formationThroughAggregation = " << compartmentOut.formationThroughAggregation[w] << endl;
cout << "compartmentOut.depletionThroughAggregation = " << compartmentOut.depletionThroughAggregation[w] << endl;
cout << "compartmentOut.formationThroughBreakage = " << compartmentOut.formationThroughBreakage[w] << endl;
cout << "compartmentOut.depletionThroughBreakage = " << compartmentOut.depletionThroughBreakage[w] << endl;
}
double maxofthree = -DBL_MAX;
double maxAll = -DBL_MAX;
double maxLiquid = -DBL_MAX;
double maxGas = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
// cout << "compartmentOut.dfAlldt[" << i << "] is " << compartmentOut.dfAlldt[i] << endl;
if (fabs(h_fAllCompartments[i]) > 1.0e-16)
maxAll = max(maxAll, -compartmentOut.dfAlldt[i] / h_fAllCompartments[i]);
if (fabs(h_flAllCompartments[i]) > 1.0e-16)
maxLiquid = max(maxLiquid, -compartmentOut.dfLiquiddt[i] / h_flAllCompartments[i]);
if (fabs(h_fgAllCompartments[i]) > 1.0e-16)
maxGas = max(maxGas, -compartmentOut.dfGasdt[i] / h_fgAllCompartments[i]);
maxofthree = max(maxofthree, max(maxAll, max(maxLiquid, maxGas)));
}
cout << "maxAll = " << maxAll << endl;
cout << "maxLiquid = " << maxLiquid << endl;
cout << "maxGas = " << maxGas << endl;
cout << "maxofthree = " << maxofthree << endl;
while (maxofthree < 0.1 / timeStep && timeStep < 0.25)
timeStep *= 2.0;
while (maxofthree > 0.1 / timeStep && timeStep > 5.0e-5)
timeStep /= 2.0;
int nanCount = 0;
double minfAll = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
double value = 0.0;
h_fAllCompartments[i] += compartmentOut.dfAlldt[i] * timeStep;
// cout << " h_fAllCompartments[" << i <<"] is " << h_fAllCompartments[i] << endl;
if (std::isnan(h_fAllCompartments[i]))
nanCount++;
value = h_flAllCompartments[i] + compartmentOut.dfLiquiddt[i] * timeStep;
h_flAllCompartments[i] = value > 0.0 ? value : 0.0;
value = h_fgAllCompartments[i] + compartmentOut.dfGasdt[i] * timeStep;
h_fgAllCompartments[i] = value > 0.0 ? value : 0.0;
}
if (nanCount)
{
cout << endl << "*****fAllCompartments has " << nanCount << "nan values******" << endl << endl;
DUMPCSV(h_fAllCompartments);
exit(EXIT_FAILURE);
}
int countnegfAll = 0;
minfAll = getMinimumOfArray(h_fAllCompartments);
if (minfAll < -1.0e-16 && countnegfAll > 0.1 * nCompartments * nFirstSolidBins * nSecondSolidBins)
{
//int mpi_err = 0;
cout << endl;
//DUMP3DCSV(dfdtAllCompartments);
//DUMP3DCSV(fAllCompartments);
//cout << "My process id = " << mpi_id << endl;
cout << "minfAll" << minfAll << endl;
cout << "******fAllCompartments has negative values********" << endl;
cout << "Number of negative values = " << countnegfAll << endl;
DUMPCSV(h_fAllCompartments);
cout << "Aborting..." << endl;
return 1;
exit(EXIT_FAILURE);
}
// BIN recalculation
for (int c = 0; c < nCompartments; c++)
{
vector<double> liquidBins(size2D, 0.0);
vector<double> gasBins(size2D, 0.0);
vector<double> internalLiquid(size2D, 0.0);
vector<double> externalLiquid(size2D, 0.0);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
int n2 = s * nSecondSolidBins + ss;
if (fabs(h_fAllCompartments[m]) > 1.0e-16)
{
liquidBins[n2] = h_flAllCompartments[m] / h_fAllCompartments[m];
gasBins[n2] = h_fgAllCompartments[m] / h_fAllCompartments[m];
}
internalLiquid[n2] = min(granSatFactor * gasBins[n2], liquidBins[n2]);
externalLiquid[n2] = max(0.0, liquidBins[n2] - internalLiquid[n2]);
double value = compartmentIn.sMeshXY[n2] + compartmentIn.ssMeshXY[n2] + gasBins[n2];
h_internalVolumeBins[n2] = value + internalLiquid[n2];
h_externalVolumeBins[n2] = value + liquidBins[n2];
h_liquidBinsAllCompartments[m] = liquidBins[n2];
h_gasBinsAllCompartments[m] = gasBins[n2];
h_externalVolumeBinsAllCompartments[m] = h_externalVolumeBins[n2];
h_internalVolumeBinsAllCompartments[m] = h_internalVolumeBins[n2];
}
}
vector<double> d10OverCompartment(nCompartments, 0.0);
vector<double> d50OverCompartment(nCompartments, 0.0);
vector<double> d90OverCompartment(nCompartments, 0.0);
for (int c = 0; c < nCompartments; c++)
{
arrayOfDouble2D diameter = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins * s * nSecondSolidBins + ss;
diameter[s][ss] = cbrt((6 / M_PI) * h_externalVolumeBinsAllCompartments[m]) * 1.0e6;
}
vector<double> totalVolumeGrid(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid - 1; d++)
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins * s * nSecondSolidBins + ss;
if (diameter[s][ss] < sieveGrid[d + 1] && diameter[s][ss] >= sieveGrid[d])
totalVolumeGrid[d] += h_fAllCompartments[m] * h_externalVolumeBinsAllCompartments[m];
}
double sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
sum += totalVolumeGrid[d];
vector<double> volumeDistribution(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid; d++)
volumeDistribution[d] = totalVolumeGrid[d] / sum;
vector<double> cumulativeVolumeDistribution(nSieveGrid, 0.0);
sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
{
sum += volumeDistribution[d];
cumulativeVolumeDistribution[d] = sum;
}
double d10 = 0.1 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d50 = 0.5 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d90 = 0.9 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
for (size_t d = 1; d < nSieveGrid; d++)
{
double value1 = (sieveGrid[d] - sieveGrid[d - 1]) / (cumulativeVolumeDistribution[d] - cumulativeVolumeDistribution[d - 1]);
double value2 = sieveGrid[d - 1];
if (cumulativeVolumeDistribution[d - 1] < 0.5 && cumulativeVolumeDistribution[d] >= 0.5)
{
double value = 0.5 - cumulativeVolumeDistribution[d - 1];
d50 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.1 - cumulativeVolumeDistribution[d - 1];
d10 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.9 - cumulativeVolumeDistribution[d - 1];
d90 = value * value1 + value2;
}
}
d10OverCompartment[c] = d10;
d50OverCompartment[c] = d50;
d10OverCompartment[c] = d90;
}
Time.push_back(time);
//SAVING OVER TIME
//cout << endl << "************Saving over time" << endl << endl;
h_fAllCompartmentsOverTime.push_back(h_fAllCompartments);
h_externalVolumeBinsAllCompartmentsOverTime.push_back(h_externalVolumeBinsAllCompartments);
h_internalVolumeBinsAllCompartmentsOverTime.push_back(h_internalVolumeBinsAllCompartments);
h_liquidBinsAllCompartmentsOverTime.push_back(h_liquidBinsAllCompartments);
h_gasBinsAllCompartmentsOverTime.push_back(h_gasBinsAllCompartments);
cout << "time = " << time << endl;
cout << "timeStep = " << timeStep << endl;
cout << endl;
timeIdxCount++;
time += timeStep + 5;
}
size_t nTimeSteps = Time.size();
cout << endl
<< "nTimeSteps = " << nTimeSteps << endl
<< endl;
//dump values for ratio plots
dumpDiaCSVpointer(Time, formationThroughAggregationOverTime, Time.size() * nCompartments, string("FormationThroughAggregation"));
dumpDiaCSVpointer(Time, depletionThroughAggregationOverTime, Time.size() * nCompartments, string("DepletionThroughAggregation"));
dumpDiaCSVpointer(Time, formationThroughBreakageOverTime, Time.size() * nCompartments, string("FormationThroughBreakage"));
dumpDiaCSVpointer(Time, depletionThroughBreakageOverTime, Time.size() * nCompartments, string("DepletionThroughBreakage"));
double endTime = static_cast<double>(clock()) / static_cast<double>(CLOCKS_PER_SEC);
cout << "That took " << endTime - startTime << " seconds" << endl;
cout << "Code End" << endl;
return 0;
// vector<double> h(size4D, 0.0);
// for (int i = 0; i < size5D; i++)
// {
// cout << "At i = " << i << " kernel = " << compartmentOut.aggregationKernel[i] << endl;
// }
// hipFree(d_vs);
// hipFree(d_vss);
// hipFree(d_sMeshXY);
// hipFree(d_ssMeshXY);
// hipFree(d_compartmentIn);
} | 9ad842d9608b09c63faf435ef229abf77a8e19f6.cu | #include <vector>
#include <cmath>
#include <float.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include "utility.cuh"
#include "parameterData.h"
#include "liggghtsData.h"
#include "compartment.cuh"
using namespace std;
#define TWOWAYCOUPLING false
// MACROS
// Calling macros for error check and dump data to files to VaribleName.txt
#define DUMP(varName) dumpData(varName, #varName)
#define DUMP2D(varName) dump2DData(varName, #varName)
#define DUMP3D(varName) dump3DData(varName, #varName)
#define DUMPCSV(varName) dumpCSV(varName, #varName)
#define DUMP2DCSV(varName) dump2DCSV(varName, #varName)
#define DUMP3DCSV(varName) dump3DCSV(varName, #varName)
#define DUMPDIACSV(time, dia) dumpDiaCSV(time, dia, #dia)
#define DUMP2DCSV4MATLAB(varName) dump2DCSV4Matlab(varName, #varName)
// extern __shared__ double *d_sMeshXY, *d_ssMeshXY;
// ==================================== INITIALIZATION KERNEL ===================================================
__global__ void initialization_kernel(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
// __shared__ double d_sMeshXY[256], d_ssMeshXY[256];
d_sMeshXY[bdx * bix + idx] = d_vs[bix];
d_ssMeshXY[bdx * bix + idx] = d_vss[idx];
d_sAgg[bdx * bix + idx] = d_vs[idx] + d_vs[bix];
d_ssAgg[bdx * bix + idx] = d_vss[idx] + d_vss[bix];
d_sAggregationCheck[bdx * bix + idx] = d_sAgg[bdx * bix + idx] <= d_vs[bdx - 1] ? 1 : 0;
d_ssAggregationCheck[bdx * bix + idx] = d_ssAgg[bdx * bix + idx] <= d_vss[bdx - 1] ? 1 : 0;
d_sLow [bdx * bix + idx] = d_sMeshXY[bdx * bix + idx];
d_ssLow[bdx * bix + idx] = d_ssMeshXY[bdx * bix + idx];
__syncthreads();
if (bix < bdx -1)
d_sHigh[bdx * bix + idx] = d_sMeshXY[bdx * (bix + 1) + idx];
d_ssHigh[bdx * bix + idx] = d_ssMeshXY[bdx * (bix) + idx +1];
d_sHigh[bdx * (bdx -1) + idx] = 0.0;
d_ssHigh[bdx * bix + bdx - 1] = 0.0;
d_sLoc[bdx * bix + idx] = floor(log(d_sAgg[bdx * bix + idx] / fsVolCoeff) / log(fsVolBase) + 1);
d_ssLoc[bdx * bix + idx] = floor(log(d_ssAgg[bdx * bix + idx] / ssVolCoeff) / log(ssVolBase) + 1);
d_sInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
d_ssInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
__syncthreads();
double value = d_vs[idx] - d_vs[bix];
double value1 = d_vss[idx] - d_vss[bix];
d_sBreak[bdx * bix + idx] = value < 0.0 ? 0.0 : value;
d_ssBreak[bdx * bix + idx] = value1 < 0.0 ? 0.0 : value1;
d_sLocBreak[bdx * bix + idx] = (d_sBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_sBreak[bdx * idx + bix] / fsVolCoeff) / log(fsVolBase) + 1));
d_ssLocBreak[bdx * bix + idx] = (d_ssBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_ssBreak[bdx * idx + bix] / ssVolCoeff) / log(ssVolBase) + 1));
__syncthreads();
d_sCheckB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_ssCheckB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_sIndB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx];
d_ssIndB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx];
if (d_sIndB[bdx * bix + idx] < 1)
d_sIndB[bdx * bix + idx] = bdx + 1;
if (d_ssIndB[bdx * bix + idx] < 1)
d_ssIndB[bdx * bix + idx] = bdx + 1;
}
// ================================= COMPARTMENT LAUNCH KERNEL ============================================================
__global__ void launchCompartment(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, double time, double timeStep, double initialTime,
double *d_formationThroughAggregation, double *d_depletionThroughAggregation, double *d_formationThroughBreakage, double *d_depletionThroughBreakage,
double *d_fAllCompartments, double *d_flAllCompartments, double *d_fgAllCompartments, double *d_liquidAdditionRateAllCompartments,
unsigned int size2D, unsigned int size3D, unsigned int size4D, double *d_fIn, double initPorosity, double demTimeStep, int nFirstSolidBins, int nSecondSolidBins,
double granulatorLength, double partticleResTime, double premixTime, double liqAddTime, double consConst, double minPorosity, int nCompartments,
double granSatFactor, double aggKernelConst, double brkKernelConst)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
if (tix ==0)
{
d_compartmentOut->formationThroughAggregation[bix] = 0.0;
d_compartmentOut->depletionThroughAggregation[bix] = 0.0;
d_compartmentOut->formationThroughBreakage[bix] = 0.0;
d_compartmentOut->depletionThroughBreakage[bix] = 0.0;
}
// int tiy = threadIdx.y;
// int idx = bix * bdx * bdy + tiy * bdx + tix;
__syncthreads();
//if (tiy == 0)
d_compartmentIn->fAll[idx3] = d_fAllCompartments[idx3];
d_compartmentIn->fLiquid[idx3] = d_flAllCompartments[idx3];
d_compartmentIn->fGas[idx3] = d_fgAllCompartments[idx3];
d_compartmentIn->liquidAdditionRate[bix] = d_liquidAdditionRateAllCompartments[bix];
if (bix == 0)
{
d_prevCompInData->fAllPreviousCompartment[idx3] = 0.0;
d_prevCompInData->flPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fgPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fAllComingIn[idx3] = d_fIn[tix];
d_prevCompInData->fgComingIn[idx3] = 0.0;
double value = initPorosity * timeStep;
d_prevCompInData->fgComingIn[idx3] = d_fIn[tix] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) * value;
}
else
{
d_prevCompInData->fAllPreviousCompartment[idx3] = d_fAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->flPreviousCompartment[idx3] = d_flAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fgPreviousCompartment[idx3] = d_fgAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fAllComingIn[idx3] = 0.0;
d_prevCompInData->fgComingIn[idx3] = 0.0;
}
if (fabs(d_compartmentIn->fAll[idx3]) > 1e-16)
{
d_compartmentOut->liquidBins[idx3] = d_compartmentIn->fLiquid[idx3] / d_compartmentIn->fAll[idx3];
d_compartmentOut->gasBins[idx3] = d_compartmentIn->fGas[idx3] / d_compartmentIn->fAll[idx3];
}
else
{
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
}
d_aggCompVar->depletionThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfGasThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfLiquidThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->liquidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->gasBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthAggLowHigh[idx3] = 0.0;
d_aggCompVar->birthAggLowHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowHighGas[idx3] = 0.0;
d_aggCompVar->birthAggHighLow[idx3] = 0.0;
d_aggCompVar->birthAggHighLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighLowGas[idx3] = 0.0;
d_aggCompVar->birthAggLowLow[idx3] = 0.0;
d_aggCompVar->birthAggLowLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowLowGas[idx3] = 0.0;
d_aggCompVar->birthAggHighHigh[idx3] = 0.0;
d_aggCompVar->birthAggHighHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighHighGas[idx3] = 0.0;
d_aggCompVar->formationThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfGasThroughAggregationCA[idx3] = 0.0;
d_brCompVar->depletionThroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfLiquidthroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfGasThroughBreakage[idx3] = 0.0;
d_brCompVar->birthThroughBreakage1[idx3] = 0.0;
d_brCompVar->birthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->formationThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfLiquidThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfGasThroughBreakageCA[idx3] = 0.0;
__syncthreads();
d_compVar->internalLiquid[idx3] = min((granSatFactor * d_compartmentOut->gasBins[idx3]), d_compartmentOut->liquidBins[idx3]);
d_compartmentOut->internalVolumeBins[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix] + d_compVar->internalLiquid[idx3] + d_compartmentOut->gasBins[idx3];
d_compVar->externalLiquid[idx3] = max(0.0, (d_compartmentOut->liquidBins[idx3] - d_compVar->internalLiquid[idx3]));
// printf("d_compartmentOut->liquidBins = %f \n", d_compartmentOut->liquidBins[tix]);
dim3 compKernel_nblocks, compKernel_nthreads;
cudaStream_t stream1, stream2;
cudaError_t result1, result2, err;
result1 = cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
result2 = cudaStreamCreateWithFlags(&stream2, cudaStreamNonBlocking);
cudaDeviceSynchronize();
performAggCalculations<<<1,size2D, 0, stream1>>>(d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_aggCompVar, time, timeStep, initialTime, demTimeStep, bix, tix, bdx, nFirstSolidBins, nSecondSolidBins, nCompartments, aggKernelConst);
performBreakageCalculations<<<1,size2D,0,stream2>>>(d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_brCompVar, time, timeStep, initialTime, demTimeStep, bix, tix, bdx, nFirstSolidBins, nSecondSolidBins, brkKernelConst);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", cudaGetErrorString(err));
}
cudaDeviceSynchronize();
result1 = cudaStreamDestroy(stream1);
result2 = cudaStreamDestroy(stream2);
if (result1 != cudaSuccess || result2 != cudaSuccess)
{
printf("Failed to launch streams1 kernel (error code %s)!\n", cudaGetErrorString(result1));
printf("Failed to launch streams2 kernel (error code %s)!\n", cudaGetErrorString(result2));
}
d_compVar->meshXYSum[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix];
double maxValue = -DBL_MAX;
for (size_t d1 = bix * bdx; d1 < (bix+1) * bdx; d1++)
{
maxValue = max(maxValue, d_compVar->meshXYSum[d1]);
}
__syncthreads();
double valueMeshXY = 1 - (d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix]) / maxValue;
double distanceBetweenCompartments = granulatorLength / nCompartments;
double particleAverageVelocity = granulatorLength / partticleResTime;
double distanceMoved = particleAverageVelocity * timeStep / distanceBetweenCompartments;// value renamed as distanceMoved
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compartmentOut->dfAlldt[idx3] = 0.0;
d_compartmentOut->dfLiquiddt[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = 0.0;
d_compVar->particleMovement[idx3] = d_prevCompInData->fAllComingIn[idx3];
d_compVar->particleMovement[idx3] += d_prevCompInData->fAllPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->particleMovement[idx3] -= d_compartmentIn->fAll[idx3] * distanceMoved;
d_compVar->liquidMovement[idx3] = d_prevCompInData->flPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->liquidMovement[idx3] -= d_compartmentIn->fLiquid[idx3] * distanceMoved;
d_compVar->gasMovement[idx3] = d_prevCompInData->fgComingIn[idx3];
d_compVar->gasMovement[idx3] += d_prevCompInData->fgPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->gasMovement[idx3] -= d_compartmentIn->fGas[idx3] * distanceMoved;
double finalTime = premixTime + liqAddTime + initialTime;
if (tix == 0)
{
if (time >= premixTime && time <= finalTime)
d_compartmentIn->liquidAdditionRate[bix] *= timeStep;
else
d_compartmentIn->liquidAdditionRate[bix] = 0.0;
}
double totalSolidvolume = 0.0;
for (int i = bix * bdx; i < (bix+1) * bdx; i++)
totalSolidvolume += d_compartmentIn->fAll[i] * (d_compartmentIn->vs[(int) floorf((i - bix * bdx) / nFirstSolidBins)] + d_compartmentIn->vss[(i - bix * bdx) % nSecondSolidBins]);
__syncthreads();
d_compartmentOut->dfAlldt[idx3] = d_compVar->particleMovement[idx3];
d_compartmentOut->dfAlldt[idx3] += d_aggCompVar->formationThroughAggregationCA[idx3] - d_aggCompVar->depletionThroughAggregation[idx3];
d_compartmentOut->dfAlldt[idx3] += d_brCompVar->birthThroughBreakage1[idx3] + d_brCompVar->formationThroughBreakageCA[idx3] - d_brCompVar->depletionThroughBreakage[idx3];
if (totalSolidvolume > 1.0e-16)
d_brCompVar->transferThroughLiquidAddition[idx3] = d_compartmentIn->liquidAdditionRate[bix] * ((d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) / totalSolidvolume);
d_compartmentOut->dfLiquiddt[idx3] = d_compVar->liquidMovement[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughLiquidAddition[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] - d_aggCompVar->depletionOfLiquidThroughAggregation[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_brCompVar->liquidBirthThroughBreakage1[idx3] + d_brCompVar->formationOfLiquidThroughBreakageCA[idx3];
d_compartmentOut->dfLiquiddt[idx3] -= d_brCompVar->depletionOfLiquidthroughBreakage[idx3];
if(d_compartmentIn->fGas[idx3] > 1.0e-16)
{
d_brCompVar->transferThroughConsolidation[idx3] = consConst * d_compartmentOut->internalVolumeBins[idx3] * ((1 - minPorosity) / (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]));
d_brCompVar->transferThroughConsolidation[idx3] *= (d_compartmentOut->gasBins[idx3] - (minPorosity / (1-minPorosity)) * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) + d_compVar->internalLiquid[idx3]);
}
d_compartmentOut->dfGasdt[idx3] = d_compVar->gasMovement[idx3];
d_compartmentOut->dfGasdt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughConsolidation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_aggCompVar->formationOfGasThroughAggregationCA[idx3] - d_aggCompVar->depletionOfGasThroughAggregation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_brCompVar->gasBirthThroughBreakage1[idx3] + d_brCompVar->formationOfGasThroughBreakageCA[idx3];
d_compartmentOut->dfGasdt[idx3] -= d_brCompVar->depletionOfGasThroughBreakage[idx3];
__syncthreads();
if (tix == 0)
{
for (int i = bix * bdx; i < ((bix +1) * bdx); i++)
{
d_compartmentOut->formationThroughAggregation[bix] += d_aggCompVar->formationThroughAggregationCA[i];
d_compartmentOut->depletionThroughAggregation[bix] += d_aggCompVar->depletionThroughAggregation[i];
d_compartmentOut->formationThroughBreakage[bix] += d_brCompVar->formationThroughBreakageCA[i] + d_brCompVar->gasBirthThroughBreakage1[idx3];
d_compartmentOut->depletionThroughBreakage[bix] += d_brCompVar->depletionThroughBreakage[i];
}
}
}
// ===================================== MAIN FUNCTION ======================================================
int main(int argc, char *argv[])
{
cout << "Code begins..." << endl;
// Read passed arguments
string startTimeStr;
double startTime = 0.0;
liggghtsData *lData = nullptr;
parameterData *pData = nullptr;
string coreVal;
string diaVal;
string pbmInFilePath;
string timeVal;
if (argc <5)
{
cout << "All values are not available as imput parameters " << endl;
return 1;
}
pbmInFilePath = string(argv[1]);
coreVal = string(argv[2]);
diaVal = string(argv[3]);
timeVal = string(argv[4]);
pData = parameterData::getInstance();
pData->readPBMInputFile(pbmInFilePath);
int nCompartments = pData->nCompartments;
unsigned int nFirstSolidBins = pData->nFirstSolidBins;
unsigned int nSecondSolidBins = pData->nSecondSolidBins;
size_t size1D = nFirstSolidBins;
size_t size2D = size1D * nSecondSolidBins;
size_t size3D = size2D * nCompartments;
size_t size4D = size2D * size2D;
size_t size5D = size4D * nCompartments;
CompartmentIn compartmentIn(size2D, size5D, 0), x_compartmentIn(size2D, size5D, 1), *d_compartmentIn;
PreviousCompartmentIn prevCompInData(size2D, size5D, 0), x_prevCompInData(size2D, size5D, 1), *d_prevCompInData;
CompartmentOut compartmentOut(size2D, size5D, 0), x_compartmentOut(size2D, size5D, 1), *d_compartmentOut;
CompartmentDEMIn compartmentDEMIn(size2D, size5D, 0), x_compartmentDEMIn(size2D, size5D, 1), *d_compartmentDEMIn;
vector<double> h_vs(size1D, 0.0);
vector<double> h_vss(size1D, 0.0);
// Bin Initiation
double fsVolCoeff = pData->fsVolCoeff;
double fsVolBase = pData->fsVolBase;
for (size_t i = 0; i < nFirstSolidBins; i++)
h_vs[i] = fsVolCoeff * pow(fsVolBase, i); // m^3
double ssVolCoeff = pData->ssVolCoeff;
double ssVolBase = pData->ssVolBase;
for (size_t i = 0; i < nSecondSolidBins; i++)
h_vss[i] = ssVolCoeff * pow(ssVolBase, i); // m^3
arrayOfDouble2D diameter1 = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
diameter1[s][ss] = cbrt((6/M_PI) * (h_vs[s] + h_vss[ss]));
vector<double> diameter = linearize2DVector(diameter1);
vector<double> particleIn;
particleIn.push_back(726657587.0);
particleIn.push_back(286654401.0);
particleIn.push_back(118218011.0);
particleIn.push_back(50319795.0);
particleIn.push_back(20954036.0);
particleIn.push_back(7345998.0);
particleIn.push_back(1500147.0);
particleIn.push_back(76518.0);
particleIn.push_back(149.0);
vector<double> h_fIn(size2D, 0.0);
for (size_t i = 0; i < particleIn.size(); i++)
h_fIn[i * size1D + i] = particleIn[i];
// allocation of memory for the matrices that will be copied onto the device from the host
double *d_vs = device_alloc_double_vector(size1D);
double *d_vss = device_alloc_double_vector(size1D);
double *d_sMeshXY = device_alloc_double_vector(size2D);
double *d_ssMeshXY = device_alloc_double_vector(size2D);
double *d_sAgg = device_alloc_double_vector(size2D);
double *d_ssAgg = device_alloc_double_vector(size2D);
int *d_sAggregationCheck = device_alloc_integer_vector(size2D);
int *d_ssAggregationCheck = device_alloc_integer_vector(size2D);
double *d_sLow = device_alloc_double_vector(size2D);
double *d_ssLow = device_alloc_double_vector(size2D);
double *d_sHigh = device_alloc_double_vector(size2D);
double *d_ssHigh = device_alloc_double_vector(size2D);
int *d_sLoc = device_alloc_integer_vector(size2D);
int *d_ssLoc = device_alloc_integer_vector(size2D);
int *d_sInd = device_alloc_integer_vector(size2D);
int *d_ssInd = device_alloc_integer_vector(size2D);
double *d_sBreak = device_alloc_double_vector(size2D);
double *d_ssBreak = device_alloc_double_vector(size2D);
int *d_sLocBreak = device_alloc_integer_vector(size2D);
int *d_ssLocBreak = device_alloc_integer_vector(size2D);
int *d_sCheckB = device_alloc_integer_vector(size2D);
int *d_ssCheckB = device_alloc_integer_vector(size2D);
int *d_sIndB = device_alloc_integer_vector(size2D);
int *d_ssIndB = device_alloc_integer_vector(size2D);
// defining vectors for data required for compartment calculations
vector<double> h_sMeshXY(size2D, 0.0);
vector<double> h_ssMeshXY(size2D, 0.0);
vector<int> h_sAggregationCheck(size2D, 0);
vector<int> h_ssAggregationCheck(size2D, 0);
vector<double> h_sLow(size2D, 0.0);
vector<double> h_ssLow(size2D, 0.0);
vector<double> h_sHigh(size2D, 0.0);
vector<double> h_ssHigh(size2D, 0.0);
vector<int> h_sInd(size2D, 0);
vector<int> h_ssInd(size2D, 0);
vector<int> h_sCheckB(size2D, 0);
vector<int> h_ssCheckB(size2D, 0);
vector<int> h_sIndB(size2D, 0.0);
vector<int> h_ssIndB(size2D, 0.0);
vector<int> h_sLocBreak(size2D, 0.0);
vector<int> h_ssLocBreak(size2D, 0.0);
vector<double> h_sBreak(size2D, 0.0);
vector<double> h_ssBreak(size2D, 0.0);
copy_double_vector_fromHtoD(d_vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(d_vss, h_vss.data(), size1D);
int nBlocks = nFirstSolidBins;
int nThreads = nSecondSolidBins;
initialization_kernel<<<nBlocks,nThreads>>>(d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "Initialization complete" << endl;
// copy back data required for the compartment calculations
copy_double_vector_fromDtoH(h_sMeshXY.data(), d_sMeshXY, size2D);
copy_double_vector_fromDtoH(h_ssMeshXY.data(), d_ssMeshXY, size2D);
copy_integer_vector_fromDtoH(h_sAggregationCheck.data(), d_sAggregationCheck, size2D);
copy_integer_vector_fromDtoH(h_ssAggregationCheck.data(), d_ssAggregationCheck, size2D);
copy_double_vector_fromDtoH(h_sLow.data(), d_sLow, size2D);
copy_double_vector_fromDtoH(h_ssLow.data(), d_ssLow, size2D);
copy_double_vector_fromDtoH(h_sHigh.data(), d_sHigh, size2D);
copy_double_vector_fromDtoH(h_ssHigh.data(), d_ssHigh, size2D);
copy_integer_vector_fromDtoH(h_sInd.data(), d_sInd, size2D);
copy_integer_vector_fromDtoH(h_ssInd.data(), d_ssInd, size2D);
copy_integer_vector_fromDtoH(h_sCheckB.data(), d_sCheckB, size2D);
copy_integer_vector_fromDtoH(h_ssCheckB.data(), d_ssCheckB, size2D);
copy_integer_vector_fromDtoH(h_sIndB.data(), d_sIndB, size2D);
copy_integer_vector_fromDtoH(h_ssIndB.data(), d_ssIndB, size2D);
copy_integer_vector_fromDtoH(h_sLocBreak.data(), d_sLocBreak, size2D);
copy_integer_vector_fromDtoH(h_ssLocBreak.data(), d_ssLocBreak, size2D);
copy_double_vector_fromDtoH(h_sBreak.data(), d_sBreak, size2D);
copy_double_vector_fromDtoH(h_ssBreak.data(), d_ssBreak, size2D);
cudaDeviceSynchronize();
DUMP(h_sMeshXY);
DUMP(h_ssMeshXY);
DUMP(h_sAggregationCheck);
DUMP(h_ssAggregationCheck);
DUMP(h_sLow);
DUMP(h_ssLow);
DUMP(h_sHigh);
DUMP(h_ssHigh);
DUMP(h_sInd);
DUMP(h_ssInd);
DUMP(h_sCheckB);
DUMP(h_ssCheckB);
DUMP(h_sIndB);
DUMP(h_ssIndB);
DUMP(h_sLocBreak);
DUMP(h_ssLocBreak);
DUMP(h_sBreak);
DUMP(h_ssBreak);
vector<double> h_fAllCompartments(size3D, 0.0);
vector<double> h_flAllCompartments(size3D, 0.0);
vector<double> h_fgAllCompartments(size3D, 0.0);
vector<double> h_dfdtAllCompartments(size3D, 0.0);
vector<double> h_dfldtAllCompartments(size3D, 0.0);
vector<double> h_dfgdtAllCompartments(size3D, 0.0);
vector<double> h_externalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_liquidBinsAllCompartments(size3D, 0.0);
vector<double> h_gasBinsAllCompartments(size3D, 0.0);
vector<double> h_totalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalLiquidAllCompartments(size3D, 0.0);
vector<double> h_externalLiquidAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBins(size2D, 0.0);
vector<double> h_externalVolumeBins(size2D, 0.0);
lData = liggghtsData::getInstance();
lData->readLiggghtsDataFiles(coreVal, diaVal);
vector<double> DEMDiameter = lData->getDEMParticleDiameters();
if ((DEMDiameter).size() == 0)
{
cout << "Diameter data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> DEMImpactData = lData->getFinalDEMImpactData();
if ((DEMImpactData).size() == 0)
{
cout << "Impact data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
arrayOfDouble2D DEMCollisionData = lData->getFinalDEMCollisionData();
if (DEMCollisionData.size() == 0)
{
cout << "Collision data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> velocity = lData->getFinalDEMImpactVelocity();
if (velocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> colVelocity = lData->getFinalDEMCollisionVelocity();
if (colVelocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS collision output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
// moved velocity based probability calculation to the model from kernel.cpp to reduce computation
double demTimeStep = pData->demTimeStep;
copy_double_vector_fromHtoD(x_compartmentDEMIn.velocityCol, colVelocity.data(), size1D);
double inverseDiameterSum = 0.0;
double inverseMassSum = 0.0;
int sized = DEMDiameter.size();
double solDensity = pData->solDensity;
for (int i = 0; i < sized; i++)
{
inverseDiameterSum += (1 / DEMDiameter[i]);
inverseMassSum += (1 / ((4 / 3) * M_PI * pow((DEMDiameter[i] / 2), 3) * solDensity));
}
double coefOfRest = pData->coefOfRest;
double liqThick = pData->liqThick;
double surfAsp = pData->surfAsp;
double bindVisc = pData->bindVisc;
double sumVelo = 0.0;
double harmonic_diameter = sized / inverseDiameterSum;
double harmonic_mass = sized / inverseMassSum;
double uCritical = (10 + (1 / coefOfRest)) * log((liqThick / surfAsp)) * (3 * M_PI * pow(harmonic_diameter, 2) * bindVisc) / (8 * harmonic_mass);
// x_compartmentDEMIn.uCriticalCol[0] = uCritical;
copy_double_vector_fromHtoD(x_compartmentDEMIn.uCriticalCol, &uCritical, 1);
// cout << "Critical velocity for agg is " << uCritical << endl;
int veloSize = colVelocity.size();
for (int i = 0; i < veloSize; i++)
sumVelo += colVelocity[i];
unsigned int nDEMBins = pData->nDEMBins;
double averageVelocity = sumVelo / nDEMBins;
double stdDevVelocity = 0.0;
double varianceVelocity = 0.0;
for (int i = 0; i < veloSize; ++i)
varianceVelocity += pow((colVelocity[i] - averageVelocity), 2) / nDEMBins;
stdDevVelocity = sqrt(varianceVelocity);
//double intVelocity = 0.0;
vector<double> colProbablityOfVelocity(veloSize, 0.0);
for (int i = 0; i < veloSize; i++)
{
colProbablityOfVelocity[i] = (1 / (colVelocity[i] * sqrt(2 * M_PI) * stdDevVelocity)) * exp(-((log(colVelocity[i]) - averageVelocity) / (2 * pow(varianceVelocity, 2))));
// cout << "Probability at " << velocity[i] << "is " << colProbablityOfVelocity[i] << endl;
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.colProbability, colProbablityOfVelocity.data(), size1D);
// vector<double> impactFrequency = DEMImpactData;
// for (int s = 0; s < nFirstSolidBins; s++)
// for (int ss = 0; ss < nSecondSolidBins; ss++)
// for (int i = 0; i < nDEMBins; i++)
// {
// if (fAll[n2] > 0.0)
// impactFrequency[i] = (DEMImpactData[i] * timeStep) / demTimeStep;
// }
double critStDefNum = pData->critStDefNum;
double initPorosity = pData->initPorosity;
double Ubreak = (2 * critStDefNum / solDensity) * (9 / 8.0) * (pow((1 - initPorosity), 2) / pow(initPorosity, 2)) * (9 / 16.0) * (bindVisc / DEMDiameter[0]);
// x_compartmentDEMIn.ubreak[0] = Ubreak;
copy_double_vector_fromHtoD(x_compartmentDEMIn.ubreak, &Ubreak, 1);
int size1 = velocity.size();
double sum = 0.0;
for (int i = 0; i < size1; i++)
sum += velocity[i];
double averageVelocityBr = sum / nDEMBins;
double stdDevVelocityBr = 0.0;
double varianceVelocityBr = 0.0;
for (int i = 0; i < size1; ++i)
{
varianceVelocityBr += pow((velocity[i] - averageVelocityBr), 2) / nDEMBins;
}
stdDevVelocityBr = sqrt(varianceVelocityBr);
//double intVelocity = 0.0;
// cout << "Std Dev. of Velocity = " << stdDevVelocity << endl;
vector<double> breakageProbablityOfVelocity(size1, 0.0);
for (int i = 0; i < size1; i++)
{
if (velocity[i] != 0)
{
breakageProbablityOfVelocity[i] = (1 / (velocity[i] * sqrt(2 * M_PI) * stdDevVelocityBr)) * exp(-((log(velocity[i]) - averageVelocityBr) / (2 * pow(varianceVelocityBr, 2))));
}
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.brProbability, breakageProbablityOfVelocity.data(), size1D);
DUMP2D(DEMCollisionData);
DUMP(DEMDiameter);
DUMP(DEMImpactData);
DUMP(velocity);
//Initialize DEM data for compartment
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMDiameter, DEMDiameter.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMCollisionData, linearize2DVector(DEMCollisionData).data(), size2D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMImpactData, DEMImpactData.data(), size1D);
vector<double> liquidAdditionRateAllCompartments(nCompartments, 0.0);
double liqSolidRatio = pData->liqSolidRatio;
double throughput = pData->throughput;
double liqDensity = pData->liqDensity;
double liquidAddRate = (liqSolidRatio * throughput) / (liqDensity * 3600);
liquidAdditionRateAllCompartments[0] = liquidAddRate;
arrayOfDouble2D h_fAllCompartmentsOverTime;
arrayOfDouble2D h_externalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_internalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_liquidBinsAllCompartmentsOverTime;
arrayOfDouble2D h_gasBinsAllCompartmentsOverTime;
double granulatorLength = pData->granulatorLength;
double partticleResTime = pData->partticleResTime;
double particleAveVelo = granulatorLength / partticleResTime;
vector<double> particleAverageVelocity(nCompartments, particleAveVelo);
//Initialize input data for compartment
copy_double_vector_fromHtoD(x_compartmentIn.vs, h_vs.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.vss, h_vss.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.diameter, diameter.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sMeshXY, h_sMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssMeshXY, h_ssMeshXY.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sAggregationCheck, h_sAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssAggregationCheck, h_ssAggregationCheck.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sLow, h_sLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sHigh, h_sHigh.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssLow, h_ssLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssHigh, h_ssHigh.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sInd, h_sInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssInd, h_ssInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sCheckB, h_sCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssCheckB, h_ssCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sIndB, h_sIndB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssIndB, h_ssIndB.data(), size2D);
vector<int> sieveGrid;
sieveGrid.push_back(38);
sieveGrid.push_back(63);
sieveGrid.push_back(90);
sieveGrid.push_back(125);
sieveGrid.push_back(250);
sieveGrid.push_back(355);
sieveGrid.push_back(500);
sieveGrid.push_back(710);
sieveGrid.push_back(850);
sieveGrid.push_back(1000);
sieveGrid.push_back(1400);
sieveGrid.push_back(2000);
sieveGrid.push_back(2380);
sieveGrid.push_back(4000);
size_t nSieveGrid = sieveGrid.size();
vector<double> d10OverTime(size2D, 0.0);
vector<double> d50OverTime(size2D, 0.0);
vector<double> d90OverTime(size2D, 0.0);
double time = stod(timeVal); // initial time to start PBM
double timeStep = 0.5; //1.0e-1;
vector<double> Time;
double lastTime = time;
int timeIdxCount = 0;
int lastTimeIdxCount = 0;
double premixTime = pData->premixTime;
double liqAddTime = pData->liqAddTime;
double postMixTime = pData->postMixTime;
double finalTime = premixTime + liqAddTime + postMixTime + stod(timeVal);
vector<double *> formationThroughAggregationOverTime;
vector<double *> depletionThroughAggregationOverTime;
vector<double *> formationThroughBreakageOverTime;
vector<double *> depletionThroughBreakageOverTime;
cout << "time" << endl;
// defining compartment varibale pointers
CompartmentVar compVar(size3D, size5D, 0), d_compVarCpy(size3D, size5D, 1), *d_compVar;
AggregationCompVar aggCompVar(size3D, size5D, 0), x_aggCompVar(size3D, size5D, 1), *d_aggCompVar;
BreakageCompVar brCompVar(size3D, size5D, 0), x_brCompVar(size3D, size5D, 1), *d_brCompVar;
// allocating memory for structures used for compartment calculations
err = cudaMalloc(&d_compartmentIn, sizeof(CompartmentIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : CompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_prevCompInData, sizeof(PreviousCompartmentIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : prevCompInData (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_compartmentDEMIn, sizeof(CompartmentDEMIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : compartmentDEMIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**) &d_compVar, sizeof(CompartmentVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : CompartmentVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_aggCompVar, sizeof(AggregationCompVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : AggregationCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_brCompVar, sizeof(BreakageCompVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : BreakageCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_compartmentOut, sizeof(CompartmentOut));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : d_compartmentOut (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// // copying data to the allocated GPU
cudaMemcpy(d_compartmentIn, &x_compartmentIn, sizeof(CompartmentIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_prevCompInData, &x_prevCompInData, sizeof(PreviousCompartmentIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : PreviousCompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compartmentDEMIn, &x_compartmentDEMIn, sizeof(CompartmentDEMIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentDEMIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compVar, &d_compVarCpy, sizeof(CompartmentVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double aggKernelConst = pData->aggKernelConst;
// x_aggCompVar.aggKernelConst[0] = aggKernelConst;
copy_double_vector_fromHtoD(x_aggCompVar.aggKernelConst, &aggKernelConst, 1);
double brkKernelConst = pData->brkKernelConst;
// x_brCompVar.brkKernelConst[0] = brkKernelConst;
copy_double_vector_fromHtoD(x_brCompVar.brkKernelConst, &brkKernelConst, 1);
cudaMemcpy(d_aggCompVar, &x_aggCompVar, sizeof(AggregationCompVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : AggregationCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_brCompVar, &x_brCompVar, sizeof(BreakageCompVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : BreakageCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compartmentOut, &x_compartmentOut, sizeof(CompartmentOut), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : compartmentOut (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// vector<double> h_formationThroughAggregation(nCompartments, 0.0);
// vector<double> h_depletionThroughAggregation(nCompartments, 0.0);
// vector<double> h_formationThroughBreakage(nCompartments, 0.0);
// vector<double> h_depletionThroughBreakage(nCompartments, 0.0);
double *d_formationThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_depletionThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_formationThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_depletionThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_fAllCompartments = device_alloc_double_vector(size3D);
double *d_flAllCompartments = device_alloc_double_vector(size3D);
double *d_fgAllCompartments = device_alloc_double_vector(size3D);
double *d_liquidAdditionRateAllCompartments = device_alloc_double_vector(nCompartments);
double *d_fIn = device_alloc_double_vector(size2D);
copy_double_vector_fromHtoD(d_liquidAdditionRateAllCompartments, liquidAdditionRateAllCompartments.data(), nCompartments);
copy_double_vector_fromHtoD(d_fIn, h_fIn.data(), size2D);
// dim3 compKernel_nblocks, compKernel_nthreads;
// compKernel_nblocks = dim3(nCompartments,1,1);
// compKernel_nthreads = dim3(size2D, size2D,1);
int compKernel_nblocks = 16;
int compKernel_nthreads = size2D * size2D;
// double granulatorLength = pData->granulatorLength;
// double partticleResTime = pData->partticleResTime;
// double premixTime = pData->premixTime;
// double liqAddTime = pData->liqAddTime;
double consConst = pData->consConst;
double minPorosity = pData->minPorosity;
double granSatFactor = pData->granSatFactor;
int threads = size2D;
cudaDeviceSynchronize();
while (time <= finalTime)
{
CompartmentOut h_results(size2D, size5D, 1);
copy_double_vector_fromHtoD(d_fAllCompartments, h_fAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_flAllCompartments, h_flAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_fgAllCompartments, h_fgAllCompartments.data(), size3D);
cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 0);
launchCompartment<<<nCompartments,threads>>>(d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar,
time, timeStep, stod(timeVal), d_formationThroughAggregation, d_depletionThroughAggregation,d_formationThroughBreakage,
d_depletionThroughBreakage, d_fAllCompartments, d_flAllCompartments, d_fgAllCompartments,
d_liquidAdditionRateAllCompartments, size2D, size3D, size4D, d_fIn, initPorosity, demTimeStep, nFirstSolidBins, nSecondSolidBins,
granulatorLength, partticleResTime, premixTime, liqAddTime, consConst, minPorosity, nCompartments, granSatFactor, aggKernelConst, brkKernelConst);
// cudaDeviceSynchronize();
err = cudaSuccess; // check kernel launach
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch launchCompartment kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "Compartment ended " << endl;
// Copying data strcutres reqd for calculation
err = cudaMemcpy(&h_results, d_compartmentOut, sizeof(CompartmentOut), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentOut D to Hmake (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// copy necessary variables back to the CPU
copy_double_vector_fromDtoH(compartmentOut.dfAlldt, h_results.dfAlldt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfLiquiddt, h_results.dfLiquiddt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfGasdt, h_results.dfGasdt, size3D);
copy_double_vector_fromDtoH(compartmentOut.liquidBins, h_results.liquidBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.gasBins, h_results.gasBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughAggregation, h_results.formationThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughAggregation, h_results.depletionThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughBreakage, h_results.formationThroughBreakage, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughBreakage, h_results.depletionThroughBreakage, size1D);
// copy_double_vector_fromDtoH(h_fAllCompartments.data(), d_fAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_flAllCompartments.data(), d_flAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_fgAllCompartments.data(), d_fgAllCompartments, size3D);
formationThroughAggregationOverTime.push_back(compartmentOut.formationThroughAggregation);
depletionThroughAggregationOverTime.push_back(compartmentOut.depletionThroughAggregation);
formationThroughBreakageOverTime.push_back(compartmentOut.formationThroughBreakage);
depletionThroughBreakageOverTime.push_back(compartmentOut.depletionThroughBreakage);
for (int w = 0; w < nCompartments; w++)
{
cout << "compartmentOut.formationThroughAggregation = " << compartmentOut.formationThroughAggregation[w] << endl;
cout << "compartmentOut.depletionThroughAggregation = " << compartmentOut.depletionThroughAggregation[w] << endl;
cout << "compartmentOut.formationThroughBreakage = " << compartmentOut.formationThroughBreakage[w] << endl;
cout << "compartmentOut.depletionThroughBreakage = " << compartmentOut.depletionThroughBreakage[w] << endl;
}
double maxofthree = -DBL_MAX;
double maxAll = -DBL_MAX;
double maxLiquid = -DBL_MAX;
double maxGas = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
// cout << "compartmentOut.dfAlldt[" << i << "] is " << compartmentOut.dfAlldt[i] << endl;
if (fabs(h_fAllCompartments[i]) > 1.0e-16)
maxAll = max(maxAll, -compartmentOut.dfAlldt[i] / h_fAllCompartments[i]);
if (fabs(h_flAllCompartments[i]) > 1.0e-16)
maxLiquid = max(maxLiquid, -compartmentOut.dfLiquiddt[i] / h_flAllCompartments[i]);
if (fabs(h_fgAllCompartments[i]) > 1.0e-16)
maxGas = max(maxGas, -compartmentOut.dfGasdt[i] / h_fgAllCompartments[i]);
maxofthree = max(maxofthree, max(maxAll, max(maxLiquid, maxGas)));
}
cout << "maxAll = " << maxAll << endl;
cout << "maxLiquid = " << maxLiquid << endl;
cout << "maxGas = " << maxGas << endl;
cout << "maxofthree = " << maxofthree << endl;
while (maxofthree < 0.1 / timeStep && timeStep < 0.25)
timeStep *= 2.0;
while (maxofthree > 0.1 / timeStep && timeStep > 5.0e-5)
timeStep /= 2.0;
int nanCount = 0;
double minfAll = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
double value = 0.0;
h_fAllCompartments[i] += compartmentOut.dfAlldt[i] * timeStep;
// cout << " h_fAllCompartments[" << i <<"] is " << h_fAllCompartments[i] << endl;
if (std::isnan(h_fAllCompartments[i]))
nanCount++;
value = h_flAllCompartments[i] + compartmentOut.dfLiquiddt[i] * timeStep;
h_flAllCompartments[i] = value > 0.0 ? value : 0.0;
value = h_fgAllCompartments[i] + compartmentOut.dfGasdt[i] * timeStep;
h_fgAllCompartments[i] = value > 0.0 ? value : 0.0;
}
if (nanCount)
{
cout << endl << "*****fAllCompartments has " << nanCount << "nan values******" << endl << endl;
DUMPCSV(h_fAllCompartments);
exit(EXIT_FAILURE);
}
int countnegfAll = 0;
minfAll = getMinimumOfArray(h_fAllCompartments);
if (minfAll < -1.0e-16 && countnegfAll > 0.1 * nCompartments * nFirstSolidBins * nSecondSolidBins)
{
//int mpi_err = 0;
cout << endl;
//DUMP3DCSV(dfdtAllCompartments);
//DUMP3DCSV(fAllCompartments);
//cout << "My process id = " << mpi_id << endl;
cout << "minfAll" << minfAll << endl;
cout << "******fAllCompartments has negative values********" << endl;
cout << "Number of negative values = " << countnegfAll << endl;
DUMPCSV(h_fAllCompartments);
cout << "Aborting..." << endl;
return 1;
exit(EXIT_FAILURE);
}
// BIN recalculation
for (int c = 0; c < nCompartments; c++)
{
vector<double> liquidBins(size2D, 0.0);
vector<double> gasBins(size2D, 0.0);
vector<double> internalLiquid(size2D, 0.0);
vector<double> externalLiquid(size2D, 0.0);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
int n2 = s * nSecondSolidBins + ss;
if (fabs(h_fAllCompartments[m]) > 1.0e-16)
{
liquidBins[n2] = h_flAllCompartments[m] / h_fAllCompartments[m];
gasBins[n2] = h_fgAllCompartments[m] / h_fAllCompartments[m];
}
internalLiquid[n2] = min(granSatFactor * gasBins[n2], liquidBins[n2]);
externalLiquid[n2] = max(0.0, liquidBins[n2] - internalLiquid[n2]);
double value = compartmentIn.sMeshXY[n2] + compartmentIn.ssMeshXY[n2] + gasBins[n2];
h_internalVolumeBins[n2] = value + internalLiquid[n2];
h_externalVolumeBins[n2] = value + liquidBins[n2];
h_liquidBinsAllCompartments[m] = liquidBins[n2];
h_gasBinsAllCompartments[m] = gasBins[n2];
h_externalVolumeBinsAllCompartments[m] = h_externalVolumeBins[n2];
h_internalVolumeBinsAllCompartments[m] = h_internalVolumeBins[n2];
}
}
vector<double> d10OverCompartment(nCompartments, 0.0);
vector<double> d50OverCompartment(nCompartments, 0.0);
vector<double> d90OverCompartment(nCompartments, 0.0);
for (int c = 0; c < nCompartments; c++)
{
arrayOfDouble2D diameter = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins * s * nSecondSolidBins + ss;
diameter[s][ss] = cbrt((6 / M_PI) * h_externalVolumeBinsAllCompartments[m]) * 1.0e6;
}
vector<double> totalVolumeGrid(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid - 1; d++)
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins * s * nSecondSolidBins + ss;
if (diameter[s][ss] < sieveGrid[d + 1] && diameter[s][ss] >= sieveGrid[d])
totalVolumeGrid[d] += h_fAllCompartments[m] * h_externalVolumeBinsAllCompartments[m];
}
double sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
sum += totalVolumeGrid[d];
vector<double> volumeDistribution(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid; d++)
volumeDistribution[d] = totalVolumeGrid[d] / sum;
vector<double> cumulativeVolumeDistribution(nSieveGrid, 0.0);
sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
{
sum += volumeDistribution[d];
cumulativeVolumeDistribution[d] = sum;
}
double d10 = 0.1 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d50 = 0.5 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d90 = 0.9 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
for (size_t d = 1; d < nSieveGrid; d++)
{
double value1 = (sieveGrid[d] - sieveGrid[d - 1]) / (cumulativeVolumeDistribution[d] - cumulativeVolumeDistribution[d - 1]);
double value2 = sieveGrid[d - 1];
if (cumulativeVolumeDistribution[d - 1] < 0.5 && cumulativeVolumeDistribution[d] >= 0.5)
{
double value = 0.5 - cumulativeVolumeDistribution[d - 1];
d50 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.1 - cumulativeVolumeDistribution[d - 1];
d10 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.9 - cumulativeVolumeDistribution[d - 1];
d90 = value * value1 + value2;
}
}
d10OverCompartment[c] = d10;
d50OverCompartment[c] = d50;
d10OverCompartment[c] = d90;
}
Time.push_back(time);
//SAVING OVER TIME
//cout << endl << "************Saving over time" << endl << endl;
h_fAllCompartmentsOverTime.push_back(h_fAllCompartments);
h_externalVolumeBinsAllCompartmentsOverTime.push_back(h_externalVolumeBinsAllCompartments);
h_internalVolumeBinsAllCompartmentsOverTime.push_back(h_internalVolumeBinsAllCompartments);
h_liquidBinsAllCompartmentsOverTime.push_back(h_liquidBinsAllCompartments);
h_gasBinsAllCompartmentsOverTime.push_back(h_gasBinsAllCompartments);
cout << "time = " << time << endl;
cout << "timeStep = " << timeStep << endl;
cout << endl;
timeIdxCount++;
time += timeStep + 5;
}
size_t nTimeSteps = Time.size();
cout << endl
<< "nTimeSteps = " << nTimeSteps << endl
<< endl;
//dump values for ratio plots
dumpDiaCSVpointer(Time, formationThroughAggregationOverTime, Time.size() * nCompartments, string("FormationThroughAggregation"));
dumpDiaCSVpointer(Time, depletionThroughAggregationOverTime, Time.size() * nCompartments, string("DepletionThroughAggregation"));
dumpDiaCSVpointer(Time, formationThroughBreakageOverTime, Time.size() * nCompartments, string("FormationThroughBreakage"));
dumpDiaCSVpointer(Time, depletionThroughBreakageOverTime, Time.size() * nCompartments, string("DepletionThroughBreakage"));
double endTime = static_cast<double>(clock()) / static_cast<double>(CLOCKS_PER_SEC);
cout << "That took " << endTime - startTime << " seconds" << endl;
cout << "Code End" << endl;
return 0;
// vector<double> h(size4D, 0.0);
// for (int i = 0; i < size5D; i++)
// {
// cout << "At i = " << i << " kernel = " << compartmentOut.aggregationKernel[i] << endl;
// }
// cudaFree(d_vs);
// cudaFree(d_vss);
// cudaFree(d_sMeshXY);
// cudaFree(d_ssMeshXY);
// cudaFree(d_compartmentIn);
} |
cc5af5a4c0f26e5b2648a0d0721a97308f126864.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip/Normalization.cuh>
namespace at { namespace native {
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(Tensor& output, Tensor& save_mean, Tensor& save_invstd, const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_cuda", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
batch_norm_cuda_template<at::Half, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
} else {
batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
}
} else {
if (is_half_float) {
batch_norm_cuda_template<at::Half, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
} else {
batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
}
}
});
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self, at::MemoryFormat::Contiguous);
int64_t n_input = self.size(1);
auto input_options = self.options();
// Accumulate in higher precision if input is half
if (self.scalar_type() == at::ScalarType::Half) {
input_options = input_options.dtype(ScalarType::Float);
}
Tensor save_mean, save_invstd;
if (train) {
save_mean = at::empty({n_input}, input_options);
save_invstd = at::empty({n_input}, input_options);
} else {
save_mean = at::empty({0}, input_options);
save_invstd = at::empty({0}, input_options);
}
batch_norm_cuda_out(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const Tensor& weight, const Tensor& running_mean, const Tensor& running_var,
const Tensor& save_mean, const Tensor& save_invstd, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_cuda", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_cuda_template<at::Half, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
}
} else {
if (is_half_float) {
return batch_norm_backward_cuda_template<at::Half, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon);
} else {
return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon);
}
});
}
Tensor batch_norm_elemt_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& mean, const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
batch_norm_elemt_cuda_out(output, self, weight, bias, mean, invstd, epsilon);
return output;
}
Tensor& batch_norm_elemt_cuda_out(Tensor& output, const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& mean, const Tensor& invstd, double epsilon) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
batch_norm_elemt_cuda_template<at::Half, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon);
}
} else {
if (is_half_float) {
batch_norm_elemt_cuda_template<at::Half, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon);
}
}
});
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean,
const Tensor& running_var, double momentum, double epsilon, int64_t count) {
std::vector<int64_t> counts(mean.size(0), count);
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean,
const Tensor& running_var, double momentum, double epsilon, IntArrayRef counts) {
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.dtype());
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, bool input_g, bool weight_g, bool bias_g) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_reduce_cuda_template<at::Half, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (is_half_float) {
return batch_norm_backward_reduce_cuda_template<at::Half, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& mean_dy, const Tensor& mean_dy_xmu) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_elemt_cuda_template<at::Half, float, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
}
} else {
if (is_half_float) {
return batch_norm_backward_elemt_cuda_template<at::Half, float, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const Tensor& running_mean, const Tensor& running_var, double momentum) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
// <sigh> Some workloads depend on passing in half input and float stats, which is
// usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this
// one so it needs to support the same case, or people start to complain.
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_update_stats_cuda_template<at::Half, float, int32_t>(self, running_mean, running_var, momentum);
} else {
return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum);
}
} else {
if (is_half_float) {
return batch_norm_update_stats_cuda_template<at::Half, float, int64_t>(self, running_mean, running_var, momentum);
} else {
return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum);
}
}
});
}
} } // namespace at::native
| cc5af5a4c0f26e5b2648a0d0721a97308f126864.cu | #include <ATen/native/cuda/Normalization.cuh>
namespace at { namespace native {
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(Tensor& output, Tensor& save_mean, Tensor& save_invstd, const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_cuda", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
batch_norm_cuda_template<at::Half, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
} else {
batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
}
} else {
if (is_half_float) {
batch_norm_cuda_template<at::Half, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
} else {
batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
}
}
});
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self, at::MemoryFormat::Contiguous);
int64_t n_input = self.size(1);
auto input_options = self.options();
// Accumulate in higher precision if input is half
if (self.scalar_type() == at::ScalarType::Half) {
input_options = input_options.dtype(ScalarType::Float);
}
Tensor save_mean, save_invstd;
if (train) {
save_mean = at::empty({n_input}, input_options);
save_invstd = at::empty({n_input}, input_options);
} else {
save_mean = at::empty({0}, input_options);
save_invstd = at::empty({0}, input_options);
}
batch_norm_cuda_out(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const Tensor& weight, const Tensor& running_mean, const Tensor& running_var,
const Tensor& save_mean, const Tensor& save_invstd, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_cuda", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_cuda_template<at::Half, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
}
} else {
if (is_half_float) {
return batch_norm_backward_cuda_template<at::Half, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon);
} else {
return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon);
}
});
}
Tensor batch_norm_elemt_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& mean, const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
batch_norm_elemt_cuda_out(output, self, weight, bias, mean, invstd, epsilon);
return output;
}
Tensor& batch_norm_elemt_cuda_out(Tensor& output, const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& mean, const Tensor& invstd, double epsilon) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
batch_norm_elemt_cuda_template<at::Half, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon);
}
} else {
if (is_half_float) {
batch_norm_elemt_cuda_template<at::Half, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon);
}
}
});
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean,
const Tensor& running_var, double momentum, double epsilon, int64_t count) {
std::vector<int64_t> counts(mean.size(0), count);
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean,
const Tensor& running_var, double momentum, double epsilon, IntArrayRef counts) {
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.dtype());
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, bool input_g, bool weight_g, bool bias_g) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_reduce_cuda_template<at::Half, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (is_half_float) {
return batch_norm_backward_reduce_cuda_template<at::Half, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& mean_dy, const Tensor& mean_dy_xmu) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_backward_elemt_cuda_template<at::Half, float, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
}
} else {
if (is_half_float) {
return batch_norm_backward_elemt_cuda_template<at::Half, float, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const Tensor& running_mean, const Tensor& running_var, double momentum) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
// <sigh> Some workloads depend on passing in half input and float stats, which is
// usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this
// one so it needs to support the same case, or people start to complain.
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float) {
return batch_norm_update_stats_cuda_template<at::Half, float, int32_t>(self, running_mean, running_var, momentum);
} else {
return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum);
}
} else {
if (is_half_float) {
return batch_norm_update_stats_cuda_template<at::Half, float, int64_t>(self, running_mean, running_var, momentum);
} else {
return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum);
}
}
});
}
} } // namespace at::native
|
c0590aee8b836018fd16a1e934f87bfa3dc0018e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// uwpind_derivative will calculate gradient of a field living on the
// 2d surface embedded in the 3d space
// due to the embedding nature, we may use geometrical information in the
// level set to avoid interpolation through discontinuities
// the gradient will be the gradient selected in a normal flow from the boundary
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__global__
void upwind_derivative(double * fx, double * fy, double * fz, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
double v_fore, v_back;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
v_fore = cpr[ind];
v_back = cpl[ind];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx,v_fore,v_back);
fx[ind] = (vx[ind]>0) ? xL : ( (vx[ind]<0) ? xR : (xL+xR)/2.0 );
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
v_fore = cpf[ind];
v_back = cpb[ind];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy,v_fore,v_back);
fy[ind] = (vy[ind]>0) ? yB : ( (vx[ind]<0) ? yF : (yB+yF)/2.0 );
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
v_fore = cpu[ind];
v_back = cpd[ind];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz,v_fore,v_back);
fz[ind] = (vz[ind]>0) ? zD : ( (vz[ind]<0) ? zU : (zD+zU)/2.0 );
}
| c0590aee8b836018fd16a1e934f87bfa3dc0018e.cu | // uwpind_derivative will calculate gradient of a field living on the
// 2d surface embedded in the 3d space
// due to the embedding nature, we may use geometrical information in the
// level set to avoid interpolation through discontinuities
// the gradient will be the gradient selected in a normal flow from the boundary
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__global__
void upwind_derivative(double * fx, double * fy, double * fz, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
double v_fore, v_back;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
v_fore = cpr[ind];
v_back = cpl[ind];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx,v_fore,v_back);
fx[ind] = (vx[ind]>0) ? xL : ( (vx[ind]<0) ? xR : (xL+xR)/2.0 );
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
v_fore = cpf[ind];
v_back = cpb[ind];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy,v_fore,v_back);
fy[ind] = (vy[ind]>0) ? yB : ( (vx[ind]<0) ? yF : (yB+yF)/2.0 );
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
v_fore = cpu[ind];
v_back = cpd[ind];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz,v_fore,v_back);
fz[ind] = (vz[ind]>0) ? zD : ( (vz[ind]<0) ? zU : (zD+zU)/2.0 );
}
|
91b961de18ca84c2b9e1a77de3abc4e28ab4ad53.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels.h"
#include <hip/hip_runtime_api.h>
#include "hip/hip_runtime.h"
#include "hipfft.h"
#include "stdio.h"
//This include is completely unnecessary and can be omitted - only used to prevent Intellisense from thinking CUDA variables are undefined
#include <device_launch_parameters.h>
__global__ void propagator(int N, int M, double z, double dx, double n, double lambda, hipfftComplex* Hq){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float FX, FY, res;
float pre = (float)(n/lambda);
float calc = (float)(1/dx);
int newIndex;
int count = N*M;
for (int i = index; i < count; i += stride)
{
newIndex = (i + count/2-1) % (count);
FX = ((float)(1+(i/M)) * calc/(float)(N)) - calc/2.0f;
FY = ((float)(1+(i%M)) * calc/(float)(M)) - calc/2.0f;
res = 2 * (float)(M_PI*z*pre) * sqrtf(1 - SQUARE(FX/pre) - SQUARE(FY/pre));
if (sqrtf(SQUARE(FX)+SQUARE(FX)) < pre){
Hq[(newIndex % M) > M/2-1 ? newIndex-M/2 : newIndex+M/2] = make_cuFloatComplex(cosf(res),sinf(res));
}else{
Hq[(newIndex % M) > M/2-1 ? newIndex-M/2 : newIndex+M/2] = make_cuFloatComplex(0,0);
}
}
}
__global__ void multiply(int count, hipfftComplex* in, hipfftComplex* out){
hipfftComplex temp;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
temp = make_cuFloatComplex(out[i].x/(float)(count), out[i].y/(float)(count));
out[i] = cuCmulf(in[i], temp);
}
}
__global__ void multiplyf(int count, double* in1, double* in2, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in1[i]*in2[i];
}
}
__global__ void multiplyfc(int count, double* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex(out[i].x*in[i],out[i].y*in[i]);
}
}
__global__ void absolute(int count, hipfftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCabs(in[i]);
}
}
__global__ void real(int count, hipfftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i].x;
}
}
__global__ void imag(int count, hipfftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i].y;
}
}
__global__ void angle(int count, hipfftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = atan2(in[i].y,in[i].x);
}
}
__global__ void modelFunc(int count, double rOffset, double iOffset, hipfftDoubleComplex* in, hipfftDoubleComplex* model, double* Imodel){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
model[i] = cuCadd(make_cuDoubleComplex(rOffset, iOffset),in[i]);
Imodel[i] = SQUARE(cuCabs(model[i]));
}
}
__global__ void conjugate(int count, hipfftComplex *in, hipfftComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuConjf(in[i]);
}
}
__global__ void simpleDivision(double* num, double* div, double* res){
if(div[0] == 0.0f)
div[0] = div[0] + 0.00001f;
res[0] = num[0] / div[0];
}
__global__ void linear(int count, double* coef, double* constant, double* in, double* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(sign)
out[i] = fma(coef[0], in[i], constant[i]);
else
out[i] = fma(coef[0], in[i], -constant[i]);
}
}
__global__ void square(int count, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = SQUARE(in[i]);
}
}
__global__ void simpleSum(double* in1, double* in2, double* out){
out[0] = in1[0] + in2[0];
}
__global__ void add(int count, hipfftDoubleComplex* in1, hipfftDoubleComplex* in2, hipfftDoubleComplex* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if (sign)
out[i] = cuCadd(in1[i], in2[i]);
else
out[i] = cuCsub(in1[i], in2[i]);
}
}
__global__ void strictBounds(int count, hipfftDoubleComplex* arr, double r_min, double r_max, double i_min, double i_max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
arr[i].x = fmax(fmin(r_max, arr[i].x), r_min);
arr[i].y = fmax(fmin(i_max, arr[i].y), i_min);
}
}
__global__ void positivityBounds(int count, hipfftDoubleComplex* arr){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(arr[i].x > 0)
arr[i].x = 0;
}
}
__global__ void strictBoundsf(int count, double* in, double min, double max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
in[i] = fmax(fmin(max, in[i]), min);
}
}
__global__ void softBounds(int count, hipfftDoubleComplex* arr, double mu, double t){
double tmp = mu*t;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
double real = fabs(arr[i].x) - tmp;
double imag = fabs(arr[i].y) - tmp;
if(real < 0)
real = 0;
if(imag < 0)
imag = 0;
arr[i] = make_cuDoubleComplex(copysign(real, arr[i].x), copysign(imag, arr[i].y));
}
}
// Most naive implementation of gaussian bluring - only effective on very small kernel sizes
// Future implementation could use shared memory for larger bandwidth
__global__ void rowConvolution(int N, int M, double diameter, double* kernel, double* image, double* output, bool horizontal){
int offset;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int gridSize = blockDim.x * gridDim.x;
int count = N*M;
for(int i = index; i < count; i+=gridSize){
output[i] = 0;
for(int j = 0; j < diameter; j++){
offset = j - diameter/2;
if(horizontal){
if((i%N)+offset >= 0 && (i%N)+offset < N){
output[i] += kernel[j]*image[i+offset];
}
else
output[i] += kernel[j];
} else {
if((i/M)+offset >= 0 && (i/M)+offset < M){
output[i] += kernel[j]*image[i+offset*M];
}
else
output[i] += kernel[j];
}
}
}
}
__global__ void coffset(int count, hipfftDoubleComplex* off, hipfftDoubleComplex* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCadd(off[0], in[i]);
}
}
__global__ void offset(int count, double roff, double ioff, hipfftDoubleComplex* in, hipfftDoubleComplex* out){
hipfftDoubleComplex temp = make_cuDoubleComplex(roff, ioff);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCadd(temp, in[i]);
}
}
__global__ void offsetf(int count, double roff, double* in, double* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(sign)
out[i] = roff + in[i];
else
out[i] = roff - in[i];
}
}
__global__ void extend(int count, int multiple, hipfftDoubleComplex* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
for(int e = 0; e < multiple; e++){
out[i + e*count] = in[i];
}
}
}
//
// CONVERSION KERNELS
//
__global__ void F2C(int count, double* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex(in[i], 0);
}
}
__global__ void D2u8(int count, double* in, uint8_t* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = (uint8_t)(in[i]*255.0f);
}
}
__global__ void C2Z(int count, hipfftComplex* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex((double)in[i].x, (double)in[i].y);
}
}
__global__ void Z2C(int count, hipfftDoubleComplex* in, hipfftComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuFloatComplex((float)in[i].x, (float)in[i].y);
}
}
//
// KERNELS FOR DIVISION BY ARRAYS
//
__global__ void contractf(int count, double constant, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i] / (constant + 0.00001f);
}
}
__global__ void contractf_p(int count, double *constant, double* in, double* out){
if(constant[0] < 0.00001f){
constant[0] += 0.00001f;
}
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i] / constant[0];
}
}
//
// KERNELS FOR SCALING ARRAYS BY CONSTANTS
//
__global__ void scalef(int count, double constant, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = constant*in[i];
}
}
__global__ void scale(int count, hipfftDoubleComplex constant, hipfftDoubleComplex* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCmul(constant,in[i]);
}
}
__global__ void scale_p(int count, hipfftDoubleComplex* constant, hipfftDoubleComplex* in, hipfftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCmul(constant[0],in[i]);
}
}
//
// PARALLEL REDUCTION KERNELS
//
//Fast parallel sum
/*
* The following function of sum is taken from the publicly accessible NVidia
* webinar found at https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
*/
__global__ void sum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = 0;
for(unsigned int i = index; i < count; i+=stride){
sharedIn[thIdx] += in[i];
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] += sharedIn[thIdx+i];
}
__syncthreads();
}
if(thIdx == 0) result[blockIdx.x] = sharedIn[0];
}
__global__ void sumOfProducts(int count, double* in1, double* in2, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = 0;
for(unsigned int i = index; i < count; i+=stride){
sharedIn[thIdx] += in1[i]*in2[i];
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] += sharedIn[thIdx+i];
}
__syncthreads();
}
if(thIdx == 0) result[blockIdx.x] = sharedIn[0];
}
__global__ void maximum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = in[index];
for(int i = index+stride; i < count; i += stride){
sharedIn[thIdx] = fmax(sharedIn[thIdx], in[index]);
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] = fmax(sharedIn[thIdx], sharedIn[thIdx+i]);
}
__syncthreads();
}
if (thIdx == 0) result[blockIdx.x] = sharedIn[thIdx];
}
__global__ void minimum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = in[index];
for(int i = index+stride; i < count; i += stride){
sharedIn[thIdx] = fmin(sharedIn[thIdx], in[index]);
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] = fmin(sharedIn[thIdx], sharedIn[thIdx+i]);
}
__syncthreads();
}
if (thIdx == 0) result[blockIdx.x] = sharedIn[thIdx];
}
//
// WRAPPERS FOR PARALLEL REDUCTION KERNELS
//
void h_maximum(int count, double* d_in, double* d_result){
hipLaunchKernelGGL(( maximum), dim3(N_BLOCKS),dim3(N_THREADS),N_THREADS*sizeof(double), 0, count, d_in, d_result);
hipLaunchKernelGGL(( maximum), dim3(1), dim3(N_BLOCKS), N_BLOCKS*sizeof(double), 0, N_BLOCKS, d_result, d_result);
}
void h_minimum(int count, double* d_in, double* d_result){
hipLaunchKernelGGL(( minimum), dim3(N_BLOCKS),dim3(N_THREADS),N_THREADS*sizeof(double), 0, count, d_in, d_result);
hipLaunchKernelGGL(( minimum), dim3(1), dim3(N_BLOCKS), N_BLOCKS*sizeof(double), 0, N_BLOCKS, d_result, d_result);
}
void h_sum(int count, double* d_in, double* d_result){
hipLaunchKernelGGL(( sum), dim3(N_BLOCKS),dim3(N_THREADS),N_THREADS*sizeof(double), 0, count, d_in, d_result);
hipLaunchKernelGGL(( sum), dim3(1), dim3(N_BLOCKS), N_BLOCKS*sizeof(double), 0, N_BLOCKS, d_result, d_result);
}
void h_sumOfProducts(int count, double* d_in1, double* d_in2, double* d_result){
hipLaunchKernelGGL(( sumOfProducts), dim3(N_BLOCKS),dim3(N_THREADS),N_THREADS*sizeof(double), 0, count, d_in1, d_in2, d_result);
hipLaunchKernelGGL(( sum), dim3(1), dim3(N_BLOCKS), N_BLOCKS*sizeof(double), 0, N_BLOCKS, d_result, d_result);
}
void h_average(int count, double* d_in, double* d_result){
hipLaunchKernelGGL(( sum), dim3(N_BLOCKS),dim3(N_THREADS),N_THREADS*sizeof(double), 0, count, d_in, d_result);
hipLaunchKernelGGL(( sum), dim3(1), dim3(N_BLOCKS), N_BLOCKS*sizeof(double), 0, N_BLOCKS, d_result, d_result);
hipLaunchKernelGGL(( contractf), dim3(1),dim3(1), 0, 0, 1, (double)count, d_result, d_result);
} | 91b961de18ca84c2b9e1a77de3abc4e28ab4ad53.cu | #include "kernels.h"
#include <cuda_runtime_api.h>
#include "cuda.h"
#include "cufft.h"
#include "stdio.h"
//This include is completely unnecessary and can be omitted - only used to prevent Intellisense from thinking CUDA variables are undefined
#include <device_launch_parameters.h>
__global__ void propagator(int N, int M, double z, double dx, double n, double lambda, cufftComplex* Hq){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float FX, FY, res;
float pre = (float)(n/lambda);
float calc = (float)(1/dx);
int newIndex;
int count = N*M;
for (int i = index; i < count; i += stride)
{
newIndex = (i + count/2-1) % (count);
FX = ((float)(1+(i/M)) * calc/(float)(N)) - calc/2.0f;
FY = ((float)(1+(i%M)) * calc/(float)(M)) - calc/2.0f;
res = 2 * (float)(M_PI*z*pre) * sqrtf(1 - SQUARE(FX/pre) - SQUARE(FY/pre));
if (sqrtf(SQUARE(FX)+SQUARE(FX)) < pre){
Hq[(newIndex % M) > M/2-1 ? newIndex-M/2 : newIndex+M/2] = make_cuFloatComplex(cosf(res),sinf(res));
}else{
Hq[(newIndex % M) > M/2-1 ? newIndex-M/2 : newIndex+M/2] = make_cuFloatComplex(0,0);
}
}
}
__global__ void multiply(int count, cufftComplex* in, cufftComplex* out){
cufftComplex temp;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
temp = make_cuFloatComplex(out[i].x/(float)(count), out[i].y/(float)(count));
out[i] = cuCmulf(in[i], temp);
}
}
__global__ void multiplyf(int count, double* in1, double* in2, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in1[i]*in2[i];
}
}
__global__ void multiplyfc(int count, double* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex(out[i].x*in[i],out[i].y*in[i]);
}
}
__global__ void absolute(int count, cufftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCabs(in[i]);
}
}
__global__ void real(int count, cufftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i].x;
}
}
__global__ void imag(int count, cufftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i].y;
}
}
__global__ void angle(int count, cufftDoubleComplex* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = atan2(in[i].y,in[i].x);
}
}
__global__ void modelFunc(int count, double rOffset, double iOffset, cufftDoubleComplex* in, cufftDoubleComplex* model, double* Imodel){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
model[i] = cuCadd(make_cuDoubleComplex(rOffset, iOffset),in[i]);
Imodel[i] = SQUARE(cuCabs(model[i]));
}
}
__global__ void conjugate(int count, cufftComplex *in, cufftComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuConjf(in[i]);
}
}
__global__ void simpleDivision(double* num, double* div, double* res){
if(div[0] == 0.0f)
div[0] = div[0] + 0.00001f;
res[0] = num[0] / div[0];
}
__global__ void linear(int count, double* coef, double* constant, double* in, double* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(sign)
out[i] = fma(coef[0], in[i], constant[i]);
else
out[i] = fma(coef[0], in[i], -constant[i]);
}
}
__global__ void square(int count, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = SQUARE(in[i]);
}
}
__global__ void simpleSum(double* in1, double* in2, double* out){
out[0] = in1[0] + in2[0];
}
__global__ void add(int count, cufftDoubleComplex* in1, cufftDoubleComplex* in2, cufftDoubleComplex* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if (sign)
out[i] = cuCadd(in1[i], in2[i]);
else
out[i] = cuCsub(in1[i], in2[i]);
}
}
__global__ void strictBounds(int count, cufftDoubleComplex* arr, double r_min, double r_max, double i_min, double i_max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
arr[i].x = fmax(fmin(r_max, arr[i].x), r_min);
arr[i].y = fmax(fmin(i_max, arr[i].y), i_min);
}
}
__global__ void positivityBounds(int count, cufftDoubleComplex* arr){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(arr[i].x > 0)
arr[i].x = 0;
}
}
__global__ void strictBoundsf(int count, double* in, double min, double max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
in[i] = fmax(fmin(max, in[i]), min);
}
}
__global__ void softBounds(int count, cufftDoubleComplex* arr, double mu, double t){
double tmp = mu*t;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
double real = fabs(arr[i].x) - tmp;
double imag = fabs(arr[i].y) - tmp;
if(real < 0)
real = 0;
if(imag < 0)
imag = 0;
arr[i] = make_cuDoubleComplex(copysign(real, arr[i].x), copysign(imag, arr[i].y));
}
}
// Most naive implementation of gaussian bluring - only effective on very small kernel sizes
// Future implementation could use shared memory for larger bandwidth
__global__ void rowConvolution(int N, int M, double diameter, double* kernel, double* image, double* output, bool horizontal){
int offset;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int gridSize = blockDim.x * gridDim.x;
int count = N*M;
for(int i = index; i < count; i+=gridSize){
output[i] = 0;
for(int j = 0; j < diameter; j++){
offset = j - diameter/2;
if(horizontal){
if((i%N)+offset >= 0 && (i%N)+offset < N){
output[i] += kernel[j]*image[i+offset];
}
else
output[i] += kernel[j];
} else {
if((i/M)+offset >= 0 && (i/M)+offset < M){
output[i] += kernel[j]*image[i+offset*M];
}
else
output[i] += kernel[j];
}
}
}
}
__global__ void coffset(int count, cufftDoubleComplex* off, cufftDoubleComplex* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCadd(off[0], in[i]);
}
}
__global__ void offset(int count, double roff, double ioff, cufftDoubleComplex* in, cufftDoubleComplex* out){
cufftDoubleComplex temp = make_cuDoubleComplex(roff, ioff);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCadd(temp, in[i]);
}
}
__global__ void offsetf(int count, double roff, double* in, double* out, bool sign){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
if(sign)
out[i] = roff + in[i];
else
out[i] = roff - in[i];
}
}
__global__ void extend(int count, int multiple, cufftDoubleComplex* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
for(int e = 0; e < multiple; e++){
out[i + e*count] = in[i];
}
}
}
//
// CONVERSION KERNELS
//
__global__ void F2C(int count, double* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex(in[i], 0);
}
}
__global__ void D2u8(int count, double* in, uint8_t* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = (uint8_t)(in[i]*255.0f);
}
}
__global__ void C2Z(int count, cufftComplex* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuDoubleComplex((double)in[i].x, (double)in[i].y);
}
}
__global__ void Z2C(int count, cufftDoubleComplex* in, cufftComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = make_cuFloatComplex((float)in[i].x, (float)in[i].y);
}
}
//
// KERNELS FOR DIVISION BY ARRAYS
//
__global__ void contractf(int count, double constant, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i] / (constant + 0.00001f);
}
}
__global__ void contractf_p(int count, double *constant, double* in, double* out){
if(constant[0] < 0.00001f){
constant[0] += 0.00001f;
}
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = in[i] / constant[0];
}
}
//
// KERNELS FOR SCALING ARRAYS BY CONSTANTS
//
__global__ void scalef(int count, double constant, double* in, double* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = constant*in[i];
}
}
__global__ void scale(int count, cufftDoubleComplex constant, cufftDoubleComplex* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCmul(constant,in[i]);
}
}
__global__ void scale_p(int count, cufftDoubleComplex* constant, cufftDoubleComplex* in, cufftDoubleComplex* out){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < count; i += stride){
out[i] = cuCmul(constant[0],in[i]);
}
}
//
// PARALLEL REDUCTION KERNELS
//
//Fast parallel sum
/*
* The following function of sum is taken from the publicly accessible NVidia
* webinar found at https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
*/
__global__ void sum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = 0;
for(unsigned int i = index; i < count; i+=stride){
sharedIn[thIdx] += in[i];
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] += sharedIn[thIdx+i];
}
__syncthreads();
}
if(thIdx == 0) result[blockIdx.x] = sharedIn[0];
}
__global__ void sumOfProducts(int count, double* in1, double* in2, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = 0;
for(unsigned int i = index; i < count; i+=stride){
sharedIn[thIdx] += in1[i]*in2[i];
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] += sharedIn[thIdx+i];
}
__syncthreads();
}
if(thIdx == 0) result[blockIdx.x] = sharedIn[0];
}
__global__ void maximum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = in[index];
for(int i = index+stride; i < count; i += stride){
sharedIn[thIdx] = fmax(sharedIn[thIdx], in[index]);
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] = fmax(sharedIn[thIdx], sharedIn[thIdx+i]);
}
__syncthreads();
}
if (thIdx == 0) result[blockIdx.x] = sharedIn[thIdx];
}
__global__ void minimum(int count, double* in, double* result){
extern __shared__ double sharedIn[];
int thIdx = threadIdx.x;
int index = blockIdx.x*blockDim.x + thIdx;
int stride = blockDim.x*gridDim.x;
sharedIn[thIdx] = in[index];
for(int i = index+stride; i < count; i += stride){
sharedIn[thIdx] = fmin(sharedIn[thIdx], in[index]);
}
__syncthreads();
for(unsigned int i = blockDim.x/2 ; i>0 ; i>>=1){
if(thIdx < i){
sharedIn[thIdx] = fmin(sharedIn[thIdx], sharedIn[thIdx+i]);
}
__syncthreads();
}
if (thIdx == 0) result[blockIdx.x] = sharedIn[thIdx];
}
//
// WRAPPERS FOR PARALLEL REDUCTION KERNELS
//
void h_maximum(int count, double* d_in, double* d_result){
maximum<<<N_BLOCKS,N_THREADS,N_THREADS*sizeof(double)>>>(count, d_in, d_result);
maximum<<<1, N_BLOCKS, N_BLOCKS*sizeof(double)>>>(N_BLOCKS, d_result, d_result);
}
void h_minimum(int count, double* d_in, double* d_result){
minimum<<<N_BLOCKS,N_THREADS,N_THREADS*sizeof(double)>>>(count, d_in, d_result);
minimum<<<1, N_BLOCKS, N_BLOCKS*sizeof(double)>>>(N_BLOCKS, d_result, d_result);
}
void h_sum(int count, double* d_in, double* d_result){
sum<<<N_BLOCKS,N_THREADS,N_THREADS*sizeof(double)>>>(count, d_in, d_result);
sum<<<1, N_BLOCKS, N_BLOCKS*sizeof(double)>>>(N_BLOCKS, d_result, d_result);
}
void h_sumOfProducts(int count, double* d_in1, double* d_in2, double* d_result){
sumOfProducts<<<N_BLOCKS,N_THREADS,N_THREADS*sizeof(double)>>>(count, d_in1, d_in2, d_result);
sum<<<1, N_BLOCKS, N_BLOCKS*sizeof(double)>>>(N_BLOCKS, d_result, d_result);
}
void h_average(int count, double* d_in, double* d_result){
sum<<<N_BLOCKS,N_THREADS,N_THREADS*sizeof(double)>>>(count, d_in, d_result);
sum<<<1, N_BLOCKS, N_BLOCKS*sizeof(double)>>>(N_BLOCKS, d_result, d_result);
contractf<<<1,1>>>(1, (double)count, d_result, d_result);
} |
335fa1b1e6cccf945665bd7046ce579ec6ccea42.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main(int argc, char **argv){
hipDeviceProp_t dP;
float min_cc = 3.5;
int rc = hipGetDeviceProperties(&dP, 0);
if(rc != hipSuccess) {
hipError_t error = hipGetLastError();
printf("CUDA error: %s", hipGetErrorString(error));
return rc; /* Failure */
}
if((dP.major+(dP.minor/10)) < min_cc) {
printf("Minimum CUDA Compute Capability of %2.1f required: %d.%d found\n", min_cc, dP.major, dP.minor);
return 1; /* Failure */
} else {
printf("%d%d", dP.major, dP.minor);
return 0; /* Success */
}
}
| 335fa1b1e6cccf945665bd7046ce579ec6ccea42.cu | #include <stdio.h>
int main(int argc, char **argv){
cudaDeviceProp dP;
float min_cc = 3.5;
int rc = cudaGetDeviceProperties(&dP, 0);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure */
}
if((dP.major+(dP.minor/10)) < min_cc) {
printf("Minimum CUDA Compute Capability of %2.1f required: %d.%d found\n", min_cc, dP.major, dP.minor);
return 1; /* Failure */
} else {
printf("%d%d", dP.major, dP.minor);
return 0; /* Success */
}
}
|
4c49546c41f4be2d572e7142233cd6677ced1f9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ScalePlugin.hpp"
namespace MNN {
template <typename T>
__global__ void SCALE(const int n, const int channels, const int dim, const T* in, T* out,
const float* scaleData, const float* biasData);
template <>
__global__ void SCALE<float>(const int n, const int channels, const int dim, const float* in, float* out,
const float* scaleData, const float* biasData) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels;
out[index] = in[index] * scaleData[c] + biasData[c];
}
}
template <>
__global__ void SCALE<__half>(const int n, const int channels, const int dim, const __half* in, __half* out,
const float* scaleData, const float* biasData) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels;
out[index] = in[index] * __float2half(scaleData[c]) + __float2half(biasData[c]);
}
}
hipError_t ScalePlugin::ScaleExecute(nvinfer1::DataType dataType, const int count, const int channels, const int dim, const float* bottom_data,
float* top_data, const float* scale, const float* bias, hipStream_t stream) {
if (dataType == nvinfer1::DataType::kFLOAT){
hipLaunchKernelGGL(( SCALE<float>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, channels, dim, bottom_data, top_data,
scale, bias);
}else{
hipLaunchKernelGGL(( SCALE<__half>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, channels, dim, (const __half*)bottom_data, (__half*)top_data,
scale, bias);
}
return hipPeekAtLastError();
}
}; // namespace MNN | 4c49546c41f4be2d572e7142233cd6677ced1f9e.cu | #include "ScalePlugin.hpp"
namespace MNN {
template <typename T>
__global__ void SCALE(const int n, const int channels, const int dim, const T* in, T* out,
const float* scaleData, const float* biasData);
template <>
__global__ void SCALE<float>(const int n, const int channels, const int dim, const float* in, float* out,
const float* scaleData, const float* biasData) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels;
out[index] = in[index] * scaleData[c] + biasData[c];
}
}
template <>
__global__ void SCALE<__half>(const int n, const int channels, const int dim, const __half* in, __half* out,
const float* scaleData, const float* biasData) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels;
out[index] = in[index] * __float2half(scaleData[c]) + __float2half(biasData[c]);
}
}
cudaError_t ScalePlugin::ScaleExecute(nvinfer1::DataType dataType, const int count, const int channels, const int dim, const float* bottom_data,
float* top_data, const float* scale, const float* bias, cudaStream_t stream) {
if (dataType == nvinfer1::DataType::kFLOAT){
SCALE<float><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, channels, dim, bottom_data, top_data,
scale, bias);
}else{
SCALE<__half><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, channels, dim, (const __half*)bottom_data, (__half*)top_data,
scale, bias);
}
return cudaPeekAtLastError();
}
}; // namespace MNN |
444f67dbfa19a907c7f7d49027c0064d49eb0b46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp_thread_id = threadIdx.x % 32;
__shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS];
__shared__ void **tmp_ptr;
__shared__ void *arr[SHARED_MEM_ELEMENTS];
if (threadIdx.x == 0) {
for (i=0; i < SHARED_MEM_ELEMENTS; i++) {
arr[i] = (void *)&sdata[i];
}
for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) {
sdata[i] = (unsigned long long)arr[i+1];
}
sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0];
}
__syncthreads();
tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
// init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
// hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/shd_mem/fadd_shd_80_20_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 444f67dbfa19a907c7f7d49027c0064d49eb0b46.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp_thread_id = threadIdx.x % 32;
__shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS];
__shared__ void **tmp_ptr;
__shared__ void *arr[SHARED_MEM_ELEMENTS];
if (threadIdx.x == 0) {
for (i=0; i < SHARED_MEM_ELEMENTS; i++) {
arr[i] = (void *)&sdata[i];
}
for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) {
sdata[i] = (unsigned long long)arr[i+1];
}
sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0];
}
__syncthreads();
tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
// init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
// cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/shd_mem/fadd_shd_80_20_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
97aa8e60dfe30de3a28a373aa13de09915abbb69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pool.cuh"
#include "cutens.h"
void cumaxpool(cuftens *src, cuftens *dst,
int W, int H, int Sx, int Sy)
{
int D=src->D, L=src->L;
int Ms=src->M, Ns=src->N;
int Md=dst->M, Nd=dst->N;
cuASSERT(L == dst->L && D == dst->D, "err: cupool shape\n");
int TS = 16;
dim3 grid(CEIL(L, TS), CEIL(Nd, W), CEIL(Md, H));
dim3 block(TS, W, H);
for (int w = 0; w < D; w++) {
float *s = src->data + w * src->MNL;
float *d = dst->data + w * dst->MNL;
hipLaunchKernelGGL(( ker_maxpool) , dim3(grid), dim3(block), 0, 0,
s, d, Ms, Ns, Md, Nd, L, W, H, Sx, Sy);
}
}
| 97aa8e60dfe30de3a28a373aa13de09915abbb69.cu | #include "pool.cuh"
#include "cutens.h"
void cumaxpool(cuftens *src, cuftens *dst,
int W, int H, int Sx, int Sy)
{
int D=src->D, L=src->L;
int Ms=src->M, Ns=src->N;
int Md=dst->M, Nd=dst->N;
cuASSERT(L == dst->L && D == dst->D, "err: cupool shape\n");
int TS = 16;
dim3 grid(CEIL(L, TS), CEIL(Nd, W), CEIL(Md, H));
dim3 block(TS, W, H);
for (int w = 0; w < D; w++) {
float *s = src->data + w * src->MNL;
float *d = dst->data + w * dst->MNL;
ker_maxpool <<<grid, block>>>
(s, d, Ms, Ns, Md, Nd, L, W, H, Sx, Sy);
}
}
|
fe656a7d2c05e5cd37bbaf986747c0884cace82d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
double muz4;
double muz1;
double muy4;
double muy1;
double mux4;
double mux1;
double muz3;
double muz2;
double muy3;
double muy2;
double mux3;
double mux2;
double _t_5_;
double _t_7_;
double _t_3_;
double _t_9_;
double _t_11_;
double _t_10_;
double _t_12_;
double _t_13_;
double _t_14_;
double _t_0_;
double _t_16_;
double _t_15_;
double _t_17_;
double _t_18_;
double _t_19_;
double _t_4_;
double _t_2_;
double _t_6_;
double _t_1_;
double _t_8_;
double r1;
double _t_30_;
double _t_31_;
double _t_33_;
double _t_29_;
double _t_27_;
double _t_26_;
double _t_28_;
double _t_32_;
double _t_34_;
double _t_20_;
double _t_22_;
double _t_21_;
double _t_23_;
double _t_24_;
double _t_25_;
double _t_36_;
double _t_35_;
double _t_52_;
double _t_37_;
double _t_54_;
double _t_38_;
double _t_39_;
double r2;
double _t_56_;
double _t_58_;
double _t_42_;
double _t_41_;
double _t_43_;
double _t_44_;
double _t_45_;
double _t_40_;
double _t_47_;
double _t_46_;
double _t_48_;
double _t_49_;
double _t_50_;
double _t_53_;
double _t_51_;
double _t_55_;
double _t_57_;
double _t_59_;
double r3;
double _t_102_;
double _t_100_;
double _t_76_;
double _t_74_;
double _t_63_;
double _t_61_;
double _t_89_;
double _t_87_;
double _t_103_;
double _t_77_;
double _t_85_;
double _t_111_;
double _t_104_;
double _t_80_;
double _t_112_;
double _t_83_;
double _t_101_;
double _t_78_;
double _t_106_;
double _t_86_;
double _t_109_;
double _t_75_;
double _t_107_;
double _t_81_;
double _t_84_;
double _t_105_;
double _t_110_;
double _t_108_;
double _t_60_;
double _t_79_;
double _t_82_;
double _t_64_;
double _t_90_;
double _t_72_;
double _t_98_;
double _t_65_;
double _t_93_;
double _t_73_;
double _t_96_;
double _t_62_;
double _t_91_;
double _t_67_;
double _t_99_;
double _t_88_;
double _t_70_;
double _t_68_;
double _t_94_;
double _t_92_;
double _t_71_;
double _t_97_;
double _t_95_;
double _t_66_;
double _t_69_;
double _t_129_;
double _t_127_;
double _t_155_;
double _t_153_;
double _t_116_;
double _t_114_;
double _t_142_;
double _t_140_;
double _t_117_;
double _t_143_;
double _t_125_;
double _t_151_;
double _t_118_;
double _t_146_;
double _t_126_;
double _t_149_;
double _t_115_;
double _t_144_;
double _t_120_;
double _t_152_;
double _t_141_;
double _t_123_;
double _t_121_;
double _t_147_;
double _t_145_;
double _t_124_;
double _t_150_;
double _t_148_;
double _t_113_;
double _t_119_;
double _t_122_;
double _t_130_;
double _t_156_;
double _t_138_;
double _t_164_;
double _t_131_;
double _t_159_;
double _t_139_;
double _t_128_;
double _t_162_;
double _t_157_;
double _t_133_;
double _t_165_;
double _t_136_;
double _t_154_;
double _t_134_;
double _t_160_;
double _t_137_;
double _t_158_;
double _t_132_;
double _t_163_;
double _t_135_;
double _t_161_;
double _t_169_;
double _t_182_;
double _t_167_;
double _t_180_;
double _t_195_;
double _t_208_;
double _t_193_;
double _t_206_;
double _t_170_;
double _t_183_;
double _t_178_;
double _t_191_;
double _t_171_;
double _t_186_;
double _t_179_;
double _t_168_;
double _t_189_;
double _t_184_;
double _t_173_;
double _t_192_;
double _t_176_;
double _t_181_;
double _t_174_;
double _t_187_;
double _t_177_;
double _t_185_;
double _t_172_;
double _t_190_;
double _t_175_;
double _t_166_;
double _t_188_;
double _t_196_;
double _t_209_;
double _t_204_;
double _t_217_;
double _t_197_;
double _t_212_;
double _t_205_;
double _t_194_;
double _t_215_;
double _t_210_;
double _t_199_;
double _t_218_;
double _t_207_;
double _t_202_;
double _t_200_;
double _t_213_;
double _t_211_;
double _t_203_;
double _t_198_;
double _t_216_;
double _t_201_;
double _t_214_;
double uacc_0kc0jc0ic0;
double uacc_1kc0jc0ic0;
double uacc_2kc0jc0ic0;
muz4 = -3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
muz4 += mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 += mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muy4 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 += mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 += mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 += mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 += mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
_t_5_ = u_0[k][j][i-1];
_t_5_ -= u_0[k][j][i];
_t_7_ = -u_0[k][j][i];
_t_7_ += u_0[k][j][i+1];
_t_3_ = -u_0[k][j][i];
_t_3_ += u_0[k][j][i-2];
_t_9_ = -u_0[k][j][i];
_t_9_ += u_0[k][j][i+2];
_t_11_ = -u_0[k][j][i];
_t_11_ += u_0[k][j-2][i];
_t_10_ = muy1 * _t_11_;
_t_12_ = -u_0[k][j][i];
_t_12_ += u_0[k][j-1][i];
_t_10_ += muy2 * _t_12_;
_t_13_ = -u_0[k][j][i];
_t_13_ += u_0[k][j+1][i];
_t_10_ += muy3 * _t_13_;
_t_14_ = -u_0[k][j][i];
_t_14_ += u_0[k][j+2][i];
_t_10_ += muy4 * _t_14_;
_t_0_ = stry[j] * _t_10_;
_t_16_ = -u_0[k][j][i];
_t_16_ += u_0[k-2][j][i];
_t_15_ = muz1 * _t_16_;
_t_17_ = -u_0[k][j][i];
_t_17_ += u_0[k-1][j][i];
_t_15_ += muz2 * _t_17_;
_t_18_ = -u_0[k][j][i];
_t_19_ = -u_0[k][j][i];
_t_18_ += u_0[k+1][j][i];
_t_15_ += muz3 * _t_18_;
_t_19_ += u_0[k+2][j][i];
_t_15_ += muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
_t_4_ = 2.0 * mux2;
_t_2_ = 2.0 * mux1;
_t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
_t_4_ += la[k][j][i-2] * strx[i-2];
_t_2_ += la[k][j][i-1] * strx[i-1];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
_t_6_ = la[k][j][i-1] * strx[i-1];
_t_6_ += 2.0 * mux3;
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
_t_1_ = _t_2_ * _t_3_;
_t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i];
_t_8_ += 2.0 * mux4;
_t_4_ += la[k][j][i+1] * strx[i+1];
_t_1_ += _t_4_ * _t_5_;
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_8_ += la[k][j][i+1] * strx[i+1];
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_1_ += _t_6_ * _t_7_;
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_1_ += _t_8_ * _t_9_;
_t_0_ += strx[i] * _t_1_;
r1 = 1.0 / 6.0 * _t_0_;
_t_30_ = u_1[k][j-1][i];
_t_31_ = 3.0 * la[k][j][i] * stry[j];
_t_31_ += 2.0 * muy3;
_t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_33_ += 2.0 * muy4;
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_29_ = 3.0 * la[k][j][i] * stry[j];
_t_29_ += 2.0 * muy2;
_t_29_ += la[k][j+1][i] * stry[j+1];
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_33_ += la[k][j+1][i] * stry[j+1];
_t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ += 2.0 * muy1;
_t_27_ += la[k][j-1][i] * stry[j-1];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_31_ += la[k][j-1][i] * stry[j-1];
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_29_ += la[k][j-2][i] * stry[j-2];
_t_30_ -= u_1[k][j][i];
_t_26_ = _t_29_ * _t_30_;
_t_28_ = -u_1[k][j][i];
_t_28_ += u_1[k][j-2][i];
_t_26_ += _t_27_ * _t_28_;
_t_32_ = -u_1[k][j][i];
_t_32_ += u_1[k][j+1][i];
_t_26_ += _t_31_ * _t_32_;
_t_34_ = -u_1[k][j][i];
_t_34_ += u_1[k][j+2][i];
_t_26_ += _t_33_ * _t_34_;
_t_20_ = stry[j] * _t_26_;
_t_22_ = -u_1[k][j][i];
_t_22_ += u_1[k][j][i-2];
_t_21_ = mux1 * _t_22_;
_t_23_ = -u_1[k][j][i];
_t_23_ += u_1[k][j][i-1];
_t_21_ += mux2 * _t_23_;
_t_24_ = -u_1[k][j][i];
_t_24_ += u_1[k][j][i+1];
_t_21_ += mux3 * _t_24_;
_t_25_ = -u_1[k][j][i];
_t_25_ += u_1[k][j][i+2];
_t_21_ += mux4 * _t_25_;
_t_20_ += strx[i] * _t_21_;
_t_36_ = -u_1[k][j][i];
_t_36_ += u_1[k-2][j][i];
_t_35_ = muz1 * _t_36_;
_t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ += 2.0 * muz1;
_t_37_ = -u_1[k][j][i];
_t_37_ += u_1[k-1][j][i];
_t_35_ += muz2 * _t_37_;
_t_54_ = 3.0 * la[k][j][i] * strz[k];
_t_54_ += 2.0 * muz2;
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
_t_54_ += la[k-2][j][i] * strz[k-2];
_t_38_ = -u_1[k][j][i];
_t_39_ = -u_1[k][j][i];
_t_38_ += u_1[k+1][j][i];
_t_35_ += muz3 * _t_38_;
_t_39_ += u_1[k+2][j][i];
_t_35_ += muz4 * _t_39_;
_t_20_ += strz[k] * _t_35_;
r2 = 1.0 / 6.0 * _t_20_;
_t_56_ = 3.0 * la[k][j][i] * strz[k];
_t_56_ += 2.0 * muz3;
_t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_58_ += 2.0 * muz4;
_t_52_ += la[k-1][j][i] * strz[k-1];
_t_54_ += 3.0 * la[k-1][j][i] * strz[k-1];
_t_56_ += la[k-1][j][i] * strz[k-1];
_t_54_ += la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_58_ += la[k+1][j][i] * strz[k+1];
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
_t_42_ = u_2[k][j][i-2];
_t_42_ -= u_2[k][j][i];
_t_41_ = mux1 * _t_42_;
_t_43_ = -u_2[k][j][i];
_t_43_ += u_2[k][j][i-1];
_t_41_ += mux2 * _t_43_;
_t_44_ = -u_2[k][j][i];
_t_44_ += u_2[k][j][i+1];
_t_41_ += mux3 * _t_44_;
_t_45_ = -u_2[k][j][i];
_t_45_ += u_2[k][j][i+2];
_t_41_ += mux4 * _t_45_;
_t_40_ = strx[i] * _t_41_;
_t_47_ = -u_2[k][j][i];
_t_47_ += u_2[k][j-2][i];
_t_46_ = muy1 * _t_47_;
_t_48_ = -u_2[k][j][i];
_t_48_ += u_2[k][j-1][i];
_t_46_ += muy2 * _t_48_;
_t_49_ = -u_2[k][j][i];
_t_49_ += u_2[k][j+1][i];
_t_46_ += muy3 * _t_49_;
_t_50_ = -u_2[k][j][i];
_t_50_ += u_2[k][j+2][i];
_t_46_ += muy4 * _t_50_;
_t_40_ += stry[j] * _t_46_;
_t_53_ = -u_2[k][j][i];
_t_53_ += u_2[k-2][j][i];
_t_51_ = _t_52_ * _t_53_;
_t_55_ = -u_2[k][j][i];
_t_55_ += u_2[k-1][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_57_ = -u_2[k][j][i];
_t_59_ = -u_2[k][j][i];
_t_57_ += u_2[k+1][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_59_ += u_2[k+2][j][i];
_t_51_ += _t_58_ * _t_59_;
_t_40_ += strz[k] * _t_51_;
r3 = 1.0 / 6.0 * _t_40_;
_t_102_ = stry[j] * strz[k];
_t_100_ = _t_102_ * 1.0 / 144.0;
_t_76_ = stry[j] * strz[k];
_t_74_ = _t_76_ * 1.0 / 144.0;
_t_63_ = strx[i] * strz[k];
_t_61_ = _t_63_ * 1.0 / 144.0;
_t_89_ = strx[i] * strz[k];
_t_87_ = _t_89_ * 1.0 / 144.0;
_t_103_ = u_1[k-2][j-2][i];
_t_77_ = u_1[k-2][j-2][i];
_t_103_ -= u_1[k-2][j+2][i];
_t_85_ = u_1[k-2][j+2][i];
_t_77_ -= u_1[k+2][j-2][i];
_t_111_ = u_1[k+2][j-2][i];
_t_85_ -= u_1[k+2][j+2][i];
_t_111_ -= u_1[k+2][j+2][i];
_t_104_ = -u_1[k-2][j-1][i];
_t_80_ = u_1[k-2][j-1][i];
_t_80_ -= u_1[k+2][j-1][i];
_t_112_ = -u_1[k+2][j-1][i];
_t_104_ += u_1[k-2][j+1][i];
_t_103_ += 8.0 * _t_104_;
_t_83_ = u_1[k-2][j+1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_112_ += u_1[k+2][j+1][i];
_t_111_ += 8.0 * _t_112_;
_t_101_ = la[k-2][j][i] * _t_103_;
_t_101_ -= la[k+2][j][i] * _t_111_;
_t_78_ = -u_1[k-1][j-2][i];
_t_106_ = u_1[k-1][j-2][i];
_t_106_ -= u_1[k-1][j+2][i];
_t_86_ = -u_1[k-1][j+2][i];
_t_78_ += u_1[k+1][j-2][i];
_t_77_ += 8.0 * _t_78_;
_t_109_ = u_1[k+1][j-2][i];
_t_86_ += u_1[k+1][j+2][i];
_t_85_ += 8.0 * _t_86_;
_t_109_ -= u_1[k+1][j+2][i];
_t_75_ = mu[k][j-2][i] * _t_77_;
_t_75_ -= mu[k][j+2][i] * _t_85_;
_t_107_ = -u_1[k-1][j-1][i];
_t_81_ = -u_1[k-1][j-1][i];
_t_107_ += u_1[k-1][j+1][i];
_t_106_ += 8.0 * _t_107_;
_t_84_ = -u_1[k-1][j+1][i];
_t_105_ = la[k-1][j][i] * _t_106_;
_t_101_ -= 8.0 * _t_105_;
_t_81_ += u_1[k+1][j-1][i];
_t_80_ += 8.0 * _t_81_;
_t_110_ = -u_1[k+1][j-1][i];
_t_84_ += u_1[k+1][j+1][i];
_t_83_ += 8.0 * _t_84_;
_t_110_ += u_1[k+1][j+1][i];
_t_109_ += 8.0 * _t_110_;
_t_108_ = la[k+1][j][i] * _t_109_;
_t_101_ += 8.0 * _t_108_;
_t_60_ = _t_100_ * _t_101_;
_t_79_ = mu[k][j-1][i] * _t_80_;
_t_75_ -= 8.0 * _t_79_;
_t_82_ = mu[k][j+1][i] * _t_83_;
_t_75_ += 8.0 * _t_82_;
_t_60_ += _t_74_ * _t_75_;
_t_64_ = u_0[k-2][j][i-2];
_t_90_ = u_0[k-2][j][i-2];
_t_90_ -= u_0[k-2][j][i+2];
_t_72_ = u_0[k-2][j][i+2];
_t_64_ -= u_0[k+2][j][i-2];
_t_98_ = u_0[k+2][j][i-2];
_t_72_ -= u_0[k+2][j][i+2];
_t_98_ -= u_0[k+2][j][i+2];
_t_65_ = -u_0[k-1][j][i-2];
_t_93_ = u_0[k-1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
_t_73_ = -u_0[k-1][j][i+2];
_t_65_ += u_0[k+1][j][i-2];
_t_64_ += 8.0 * _t_65_;
_t_96_ = u_0[k+1][j][i-2];
_t_73_ += u_0[k+1][j][i+2];
_t_72_ += 8.0 * _t_73_;
_t_96_ -= u_0[k+1][j][i+2];
_t_62_ = mu[k][j][i-2] * _t_64_;
_t_62_ -= mu[k][j][i+2] * _t_72_;
_t_91_ = -u_0[k-2][j][i-1];
_t_67_ = u_0[k-2][j][i-1];
_t_67_ -= u_0[k+2][j][i-1];
_t_99_ = -u_0[k+2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
_t_90_ += 8.0 * _t_91_;
_t_88_ = la[k-2][j][i] * _t_90_;
_t_70_ = u_0[k-2][j][i+1];
_t_70_ -= u_0[k+2][j][i+1];
_t_99_ += u_0[k+2][j][i+1];
_t_98_ += 8.0 * _t_99_;
_t_88_ -= la[k+2][j][i] * _t_98_;
_t_68_ = -u_0[k-1][j][i-1];
_t_94_ = -u_0[k-1][j][i-1];
_t_94_ += u_0[k-1][j][i+1];
_t_93_ += 8.0 * _t_94_;
_t_92_ = la[k-1][j][i] * _t_93_;
_t_88_ -= 8.0 * _t_92_;
_t_71_ = -u_0[k-1][j][i+1];
_t_68_ += u_0[k+1][j][i-1];
_t_67_ += 8.0 * _t_68_;
_t_97_ = -u_0[k+1][j][i-1];
_t_71_ += u_0[k+1][j][i+1];
_t_70_ += 8.0 * _t_71_;
_t_97_ += u_0[k+1][j][i+1];
_t_96_ += 8.0 * _t_97_;
_t_95_ = la[k+1][j][i] * _t_96_;
_t_88_ += 8.0 * _t_95_;
_t_60_ += _t_87_ * _t_88_;
_t_66_ = mu[k][j][i-1] * _t_67_;
_t_62_ -= 8.0 * _t_66_;
_t_69_ = mu[k][j][i+1] * _t_70_;
_t_62_ += 8.0 * _t_69_;
_t_60_ += _t_61_ * _t_62_;
r3 += _t_60_;
_t_129_ = strx[i] * strz[k];
_t_127_ = _t_129_ * 1.0 / 144.0;
_t_155_ = strx[i] * strz[k];
_t_153_ = _t_155_ * 1.0 / 144.0;
_t_116_ = strx[i] * stry[j];
_t_114_ = _t_116_ * 1.0 / 144.0;
_t_142_ = strx[i] * stry[j];
_t_140_ = _t_142_ * 1.0 / 144.0;
_t_117_ = u_1[k][j-2][i-2];
_t_143_ = u_1[k][j-2][i-2];
_t_143_ -= u_1[k][j-2][i+2];
_t_125_ = u_1[k][j-2][i+2];
_t_117_ -= u_1[k][j+2][i-2];
_t_151_ = u_1[k][j+2][i-2];
_t_125_ -= u_1[k][j+2][i+2];
_t_151_ -= u_1[k][j+2][i+2];
_t_118_ = -u_1[k][j-1][i-2];
_t_146_ = u_1[k][j-1][i-2];
_t_146_ -= u_1[k][j-1][i+2];
_t_126_ = -u_1[k][j-1][i+2];
_t_118_ += u_1[k][j+1][i-2];
_t_117_ += 8.0 * _t_118_;
_t_149_ = u_1[k][j+1][i-2];
_t_126_ += u_1[k][j+1][i+2];
_t_125_ += 8.0 * _t_126_;
_t_149_ -= u_1[k][j+1][i+2];
_t_115_ = la[k][j][i-2] * _t_117_;
_t_115_ -= la[k][j][i+2] * _t_125_;
_t_144_ = -u_1[k][j-2][i-1];
_t_120_ = u_1[k][j-2][i-1];
_t_120_ -= u_1[k][j+2][i-1];
_t_152_ = -u_1[k][j+2][i-1];
_t_144_ += u_1[k][j-2][i+1];
_t_143_ += 8.0 * _t_144_;
_t_141_ = mu[k][j-2][i] * _t_143_;
_t_123_ = u_1[k][j-2][i+1];
_t_123_ -= u_1[k][j+2][i+1];
_t_152_ += u_1[k][j+2][i+1];
_t_151_ += 8.0 * _t_152_;
_t_141_ -= mu[k][j+2][i] * _t_151_;
_t_121_ = -u_1[k][j-1][i-1];
_t_147_ = -u_1[k][j-1][i-1];
_t_147_ += u_1[k][j-1][i+1];
_t_146_ += 8.0 * _t_147_;
_t_145_ = mu[k][j-1][i] * _t_146_;
_t_141_ -= 8.0 * _t_145_;
_t_124_ = -u_1[k][j-1][i+1];
_t_121_ += u_1[k][j+1][i-1];
_t_120_ += 8.0 * _t_121_;
_t_150_ = -u_1[k][j+1][i-1];
_t_124_ += u_1[k][j+1][i+1];
_t_123_ += 8.0 * _t_124_;
_t_150_ += u_1[k][j+1][i+1];
_t_149_ += 8.0 * _t_150_;
_t_148_ = mu[k][j+1][i] * _t_149_;
_t_141_ += 8.0 * _t_148_;
_t_113_ = _t_140_ * _t_141_;
_t_119_ = la[k][j][i-1] * _t_120_;
_t_115_ -= 8.0 * _t_119_;
_t_122_ = la[k][j][i+1] * _t_123_;
_t_115_ += 8.0 * _t_122_;
_t_113_ += _t_114_ * _t_115_;
_t_130_ = u_2[k-2][j][i-2];
_t_156_ = u_2[k-2][j][i-2];
_t_156_ -= u_2[k-2][j][i+2];
_t_138_ = u_2[k-2][j][i+2];
_t_130_ -= u_2[k+2][j][i-2];
_t_164_ = u_2[k+2][j][i-2];
_t_138_ -= u_2[k+2][j][i+2];
_t_164_ -= u_2[k+2][j][i+2];
_t_131_ = -u_2[k-1][j][i-2];
_t_159_ = u_2[k-1][j][i-2];
_t_159_ -= u_2[k-1][j][i+2];
_t_139_ = -u_2[k-1][j][i+2];
_t_131_ += u_2[k+1][j][i-2];
_t_130_ += 8.0 * _t_131_;
_t_128_ = la[k][j][i-2] * _t_130_;
_t_162_ = u_2[k+1][j][i-2];
_t_139_ += u_2[k+1][j][i+2];
_t_138_ += 8.0 * _t_139_;
_t_128_ -= la[k][j][i+2] * _t_138_;
_t_162_ -= u_2[k+1][j][i+2];
_t_157_ = -u_2[k-2][j][i-1];
_t_133_ = u_2[k-2][j][i-1];
_t_133_ -= u_2[k+2][j][i-1];
_t_165_ = -u_2[k+2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
_t_156_ += 8.0 * _t_157_;
_t_136_ = u_2[k-2][j][i+1];
_t_136_ -= u_2[k+2][j][i+1];
_t_165_ += u_2[k+2][j][i+1];
_t_164_ += 8.0 * _t_165_;
_t_154_ = mu[k-2][j][i] * _t_156_;
_t_154_ -= mu[k+2][j][i] * _t_164_;
_t_134_ = -u_2[k-1][j][i-1];
_t_160_ = -u_2[k-1][j][i-1];
_t_160_ += u_2[k-1][j][i+1];
_t_159_ += 8.0 * _t_160_;
_t_137_ = -u_2[k-1][j][i+1];
_t_158_ = mu[k-1][j][i] * _t_159_;
_t_154_ -= 8.0 * _t_158_;
_t_134_ += u_2[k+1][j][i-1];
_t_133_ += 8.0 * _t_134_;
_t_132_ = la[k][j][i-1] * _t_133_;
_t_128_ -= 8.0 * _t_132_;
_t_163_ = -u_2[k+1][j][i-1];
_t_137_ += u_2[k+1][j][i+1];
_t_136_ += 8.0 * _t_137_;
_t_163_ += u_2[k+1][j][i+1];
_t_162_ += 8.0 * _t_163_;
_t_135_ = la[k][j][i+1] * _t_136_;
_t_128_ += 8.0 * _t_135_;
_t_113_ += _t_127_ * _t_128_;
_t_161_ = mu[k+1][j][i] * _t_162_;
_t_154_ += 8.0 * _t_161_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
_t_169_ = strx[i] * stry[j];
_t_182_ = strx[i] * stry[j];
_t_167_ = _t_169_ * 1.0 / 144.0;
_t_180_ = _t_182_ * 1.0 / 144.0;
_t_195_ = stry[j] * strz[k];
_t_208_ = stry[j] * strz[k];
_t_193_ = _t_195_ * 1.0 / 144.0;
_t_206_ = _t_208_ * 1.0 / 144.0;
_t_170_ = u_0[k][j-2][i-2];
_t_183_ = u_0[k][j-2][i-2];
_t_183_ -= u_0[k][j-2][i+2];
_t_178_ = u_0[k][j-2][i+2];
_t_170_ -= u_0[k][j+2][i-2];
_t_191_ = u_0[k][j+2][i-2];
_t_178_ -= u_0[k][j+2][i+2];
_t_191_ -= u_0[k][j+2][i+2];
_t_171_ = -u_0[k][j-1][i-2];
_t_186_ = u_0[k][j-1][i-2];
_t_186_ -= u_0[k][j-1][i+2];
_t_179_ = -u_0[k][j-1][i+2];
_t_171_ += u_0[k][j+1][i-2];
_t_170_ += 8.0 * _t_171_;
_t_168_ = mu[k][j][i-2] * _t_170_;
_t_189_ = u_0[k][j+1][i-2];
_t_179_ += u_0[k][j+1][i+2];
_t_178_ += 8.0 * _t_179_;
_t_168_ -= mu[k][j][i+2] * _t_178_;
_t_189_ -= u_0[k][j+1][i+2];
_t_184_ = -u_0[k][j-2][i-1];
_t_173_ = u_0[k][j-2][i-1];
_t_173_ -= u_0[k][j+2][i-1];
_t_192_ = -u_0[k][j+2][i-1];
_t_184_ += u_0[k][j-2][i+1];
_t_183_ += 8.0 * _t_184_;
_t_176_ = u_0[k][j-2][i+1];
_t_176_ -= u_0[k][j+2][i+1];
_t_192_ += u_0[k][j+2][i+1];
_t_191_ += 8.0 * _t_192_;
_t_181_ = la[k][j-2][i] * _t_183_;
_t_181_ -= la[k][j+2][i] * _t_191_;
_t_174_ = -u_0[k][j-1][i-1];
_t_187_ = -u_0[k][j-1][i-1];
_t_187_ += u_0[k][j-1][i+1];
_t_186_ += 8.0 * _t_187_;
_t_177_ = -u_0[k][j-1][i+1];
_t_185_ = la[k][j-1][i] * _t_186_;
_t_181_ -= 8.0 * _t_185_;
_t_174_ += u_0[k][j+1][i-1];
_t_173_ += 8.0 * _t_174_;
_t_172_ = mu[k][j][i-1] * _t_173_;
_t_168_ -= 8.0 * _t_172_;
_t_190_ = -u_0[k][j+1][i-1];
_t_177_ += u_0[k][j+1][i+1];
_t_176_ += 8.0 * _t_177_;
_t_190_ += u_0[k][j+1][i+1];
_t_189_ += 8.0 * _t_190_;
_t_175_ = mu[k][j][i+1] * _t_176_;
_t_168_ += 8.0 * _t_175_;
_t_166_ = _t_167_ * _t_168_;
_t_188_ = la[k][j+1][i] * _t_189_;
_t_181_ += 8.0 * _t_188_;
_t_166_ += _t_180_ * _t_181_;
_t_196_ = u_2[k-2][j-2][i];
_t_209_ = u_2[k-2][j-2][i];
_t_209_ -= u_2[k-2][j+2][i];
_t_204_ = u_2[k-2][j+2][i];
_t_196_ -= u_2[k+2][j-2][i];
_t_217_ = u_2[k+2][j-2][i];
_t_204_ -= u_2[k+2][j+2][i];
_t_217_ -= u_2[k+2][j+2][i];
_t_197_ = -u_2[k-1][j-2][i];
_t_212_ = u_2[k-1][j-2][i];
_t_212_ -= u_2[k-1][j+2][i];
_t_205_ = -u_2[k-1][j+2][i];
_t_197_ += u_2[k+1][j-2][i];
_t_196_ += 8.0 * _t_197_;
_t_194_ = la[k][j-2][i] * _t_196_;
_t_215_ = u_2[k+1][j-2][i];
_t_205_ += u_2[k+1][j+2][i];
_t_204_ += 8.0 * _t_205_;
_t_194_ -= la[k][j+2][i] * _t_204_;
_t_215_ -= u_2[k+1][j+2][i];
_t_210_ = -u_2[k-2][j-1][i];
_t_199_ = u_2[k-2][j-1][i];
_t_199_ -= u_2[k+2][j-1][i];
_t_218_ = -u_2[k+2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
_t_209_ += 8.0 * _t_210_;
_t_207_ = mu[k-2][j][i] * _t_209_;
_t_202_ = u_2[k-2][j+1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_218_ += u_2[k+2][j+1][i];
_t_217_ += 8.0 * _t_218_;
_t_207_ -= mu[k+2][j][i] * _t_217_;
_t_200_ = -u_2[k-1][j-1][i];
_t_213_ = -u_2[k-1][j-1][i];
_t_213_ += u_2[k-1][j+1][i];
_t_212_ += 8.0 * _t_213_;
_t_211_ = mu[k-1][j][i] * _t_212_;
_t_207_ -= 8.0 * _t_211_;
_t_203_ = -u_2[k-1][j+1][i];
_t_200_ += u_2[k+1][j-1][i];
_t_199_ += 8.0 * _t_200_;
_t_198_ = la[k][j-1][i] * _t_199_;
_t_194_ -= 8.0 * _t_198_;
_t_216_ = -u_2[k+1][j-1][i];
_t_203_ += u_2[k+1][j+1][i];
_t_202_ += 8.0 * _t_203_;
_t_216_ += u_2[k+1][j+1][i];
_t_215_ += 8.0 * _t_216_;
_t_201_ = la[k][j+1][i] * _t_202_;
_t_194_ += 8.0 * _t_201_;
_t_166_ += _t_193_ * _t_194_;
_t_214_ = mu[k+1][j][i] * _t_215_;
_t_207_ += 8.0 * _t_214_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i];
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i];
uacc_2kc0jc0ic0 += cof * r3;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
hipMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_1;
hipMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_2;
hipMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_0;
hipMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_1;
hipMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_2;
hipMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
double *strz;
hipMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
hipLaunchKernelGGL(( sw4) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (uacc_0);
hipFree (uacc_1);
hipFree (uacc_2);
hipFree (u_0);
hipFree (u_1);
hipFree (u_2);
hipFree (mu);
hipFree (la);
hipFree (strx);
hipFree (stry);
hipFree (strz);
}
| fe656a7d2c05e5cd37bbaf986747c0884cace82d.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
double muz4;
double muz1;
double muy4;
double muy1;
double mux4;
double mux1;
double muz3;
double muz2;
double muy3;
double muy2;
double mux3;
double mux2;
double _t_5_;
double _t_7_;
double _t_3_;
double _t_9_;
double _t_11_;
double _t_10_;
double _t_12_;
double _t_13_;
double _t_14_;
double _t_0_;
double _t_16_;
double _t_15_;
double _t_17_;
double _t_18_;
double _t_19_;
double _t_4_;
double _t_2_;
double _t_6_;
double _t_1_;
double _t_8_;
double r1;
double _t_30_;
double _t_31_;
double _t_33_;
double _t_29_;
double _t_27_;
double _t_26_;
double _t_28_;
double _t_32_;
double _t_34_;
double _t_20_;
double _t_22_;
double _t_21_;
double _t_23_;
double _t_24_;
double _t_25_;
double _t_36_;
double _t_35_;
double _t_52_;
double _t_37_;
double _t_54_;
double _t_38_;
double _t_39_;
double r2;
double _t_56_;
double _t_58_;
double _t_42_;
double _t_41_;
double _t_43_;
double _t_44_;
double _t_45_;
double _t_40_;
double _t_47_;
double _t_46_;
double _t_48_;
double _t_49_;
double _t_50_;
double _t_53_;
double _t_51_;
double _t_55_;
double _t_57_;
double _t_59_;
double r3;
double _t_102_;
double _t_100_;
double _t_76_;
double _t_74_;
double _t_63_;
double _t_61_;
double _t_89_;
double _t_87_;
double _t_103_;
double _t_77_;
double _t_85_;
double _t_111_;
double _t_104_;
double _t_80_;
double _t_112_;
double _t_83_;
double _t_101_;
double _t_78_;
double _t_106_;
double _t_86_;
double _t_109_;
double _t_75_;
double _t_107_;
double _t_81_;
double _t_84_;
double _t_105_;
double _t_110_;
double _t_108_;
double _t_60_;
double _t_79_;
double _t_82_;
double _t_64_;
double _t_90_;
double _t_72_;
double _t_98_;
double _t_65_;
double _t_93_;
double _t_73_;
double _t_96_;
double _t_62_;
double _t_91_;
double _t_67_;
double _t_99_;
double _t_88_;
double _t_70_;
double _t_68_;
double _t_94_;
double _t_92_;
double _t_71_;
double _t_97_;
double _t_95_;
double _t_66_;
double _t_69_;
double _t_129_;
double _t_127_;
double _t_155_;
double _t_153_;
double _t_116_;
double _t_114_;
double _t_142_;
double _t_140_;
double _t_117_;
double _t_143_;
double _t_125_;
double _t_151_;
double _t_118_;
double _t_146_;
double _t_126_;
double _t_149_;
double _t_115_;
double _t_144_;
double _t_120_;
double _t_152_;
double _t_141_;
double _t_123_;
double _t_121_;
double _t_147_;
double _t_145_;
double _t_124_;
double _t_150_;
double _t_148_;
double _t_113_;
double _t_119_;
double _t_122_;
double _t_130_;
double _t_156_;
double _t_138_;
double _t_164_;
double _t_131_;
double _t_159_;
double _t_139_;
double _t_128_;
double _t_162_;
double _t_157_;
double _t_133_;
double _t_165_;
double _t_136_;
double _t_154_;
double _t_134_;
double _t_160_;
double _t_137_;
double _t_158_;
double _t_132_;
double _t_163_;
double _t_135_;
double _t_161_;
double _t_169_;
double _t_182_;
double _t_167_;
double _t_180_;
double _t_195_;
double _t_208_;
double _t_193_;
double _t_206_;
double _t_170_;
double _t_183_;
double _t_178_;
double _t_191_;
double _t_171_;
double _t_186_;
double _t_179_;
double _t_168_;
double _t_189_;
double _t_184_;
double _t_173_;
double _t_192_;
double _t_176_;
double _t_181_;
double _t_174_;
double _t_187_;
double _t_177_;
double _t_185_;
double _t_172_;
double _t_190_;
double _t_175_;
double _t_166_;
double _t_188_;
double _t_196_;
double _t_209_;
double _t_204_;
double _t_217_;
double _t_197_;
double _t_212_;
double _t_205_;
double _t_194_;
double _t_215_;
double _t_210_;
double _t_199_;
double _t_218_;
double _t_207_;
double _t_202_;
double _t_200_;
double _t_213_;
double _t_211_;
double _t_203_;
double _t_198_;
double _t_216_;
double _t_201_;
double _t_214_;
double uacc_0kc0jc0ic0;
double uacc_1kc0jc0ic0;
double uacc_2kc0jc0ic0;
muz4 = -3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
muz4 += mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 += mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muy4 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 += mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 += mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 += mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 += mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
_t_5_ = u_0[k][j][i-1];
_t_5_ -= u_0[k][j][i];
_t_7_ = -u_0[k][j][i];
_t_7_ += u_0[k][j][i+1];
_t_3_ = -u_0[k][j][i];
_t_3_ += u_0[k][j][i-2];
_t_9_ = -u_0[k][j][i];
_t_9_ += u_0[k][j][i+2];
_t_11_ = -u_0[k][j][i];
_t_11_ += u_0[k][j-2][i];
_t_10_ = muy1 * _t_11_;
_t_12_ = -u_0[k][j][i];
_t_12_ += u_0[k][j-1][i];
_t_10_ += muy2 * _t_12_;
_t_13_ = -u_0[k][j][i];
_t_13_ += u_0[k][j+1][i];
_t_10_ += muy3 * _t_13_;
_t_14_ = -u_0[k][j][i];
_t_14_ += u_0[k][j+2][i];
_t_10_ += muy4 * _t_14_;
_t_0_ = stry[j] * _t_10_;
_t_16_ = -u_0[k][j][i];
_t_16_ += u_0[k-2][j][i];
_t_15_ = muz1 * _t_16_;
_t_17_ = -u_0[k][j][i];
_t_17_ += u_0[k-1][j][i];
_t_15_ += muz2 * _t_17_;
_t_18_ = -u_0[k][j][i];
_t_19_ = -u_0[k][j][i];
_t_18_ += u_0[k+1][j][i];
_t_15_ += muz3 * _t_18_;
_t_19_ += u_0[k+2][j][i];
_t_15_ += muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
_t_4_ = 2.0 * mux2;
_t_2_ = 2.0 * mux1;
_t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
_t_4_ += la[k][j][i-2] * strx[i-2];
_t_2_ += la[k][j][i-1] * strx[i-1];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
_t_6_ = la[k][j][i-1] * strx[i-1];
_t_6_ += 2.0 * mux3;
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
_t_1_ = _t_2_ * _t_3_;
_t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i];
_t_8_ += 2.0 * mux4;
_t_4_ += la[k][j][i+1] * strx[i+1];
_t_1_ += _t_4_ * _t_5_;
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_8_ += la[k][j][i+1] * strx[i+1];
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_1_ += _t_6_ * _t_7_;
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_1_ += _t_8_ * _t_9_;
_t_0_ += strx[i] * _t_1_;
r1 = 1.0 / 6.0 * _t_0_;
_t_30_ = u_1[k][j-1][i];
_t_31_ = 3.0 * la[k][j][i] * stry[j];
_t_31_ += 2.0 * muy3;
_t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_33_ += 2.0 * muy4;
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_29_ = 3.0 * la[k][j][i] * stry[j];
_t_29_ += 2.0 * muy2;
_t_29_ += la[k][j+1][i] * stry[j+1];
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_33_ += la[k][j+1][i] * stry[j+1];
_t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ += 2.0 * muy1;
_t_27_ += la[k][j-1][i] * stry[j-1];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_31_ += la[k][j-1][i] * stry[j-1];
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_29_ += la[k][j-2][i] * stry[j-2];
_t_30_ -= u_1[k][j][i];
_t_26_ = _t_29_ * _t_30_;
_t_28_ = -u_1[k][j][i];
_t_28_ += u_1[k][j-2][i];
_t_26_ += _t_27_ * _t_28_;
_t_32_ = -u_1[k][j][i];
_t_32_ += u_1[k][j+1][i];
_t_26_ += _t_31_ * _t_32_;
_t_34_ = -u_1[k][j][i];
_t_34_ += u_1[k][j+2][i];
_t_26_ += _t_33_ * _t_34_;
_t_20_ = stry[j] * _t_26_;
_t_22_ = -u_1[k][j][i];
_t_22_ += u_1[k][j][i-2];
_t_21_ = mux1 * _t_22_;
_t_23_ = -u_1[k][j][i];
_t_23_ += u_1[k][j][i-1];
_t_21_ += mux2 * _t_23_;
_t_24_ = -u_1[k][j][i];
_t_24_ += u_1[k][j][i+1];
_t_21_ += mux3 * _t_24_;
_t_25_ = -u_1[k][j][i];
_t_25_ += u_1[k][j][i+2];
_t_21_ += mux4 * _t_25_;
_t_20_ += strx[i] * _t_21_;
_t_36_ = -u_1[k][j][i];
_t_36_ += u_1[k-2][j][i];
_t_35_ = muz1 * _t_36_;
_t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ += 2.0 * muz1;
_t_37_ = -u_1[k][j][i];
_t_37_ += u_1[k-1][j][i];
_t_35_ += muz2 * _t_37_;
_t_54_ = 3.0 * la[k][j][i] * strz[k];
_t_54_ += 2.0 * muz2;
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
_t_54_ += la[k-2][j][i] * strz[k-2];
_t_38_ = -u_1[k][j][i];
_t_39_ = -u_1[k][j][i];
_t_38_ += u_1[k+1][j][i];
_t_35_ += muz3 * _t_38_;
_t_39_ += u_1[k+2][j][i];
_t_35_ += muz4 * _t_39_;
_t_20_ += strz[k] * _t_35_;
r2 = 1.0 / 6.0 * _t_20_;
_t_56_ = 3.0 * la[k][j][i] * strz[k];
_t_56_ += 2.0 * muz3;
_t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_58_ += 2.0 * muz4;
_t_52_ += la[k-1][j][i] * strz[k-1];
_t_54_ += 3.0 * la[k-1][j][i] * strz[k-1];
_t_56_ += la[k-1][j][i] * strz[k-1];
_t_54_ += la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_58_ += la[k+1][j][i] * strz[k+1];
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
_t_42_ = u_2[k][j][i-2];
_t_42_ -= u_2[k][j][i];
_t_41_ = mux1 * _t_42_;
_t_43_ = -u_2[k][j][i];
_t_43_ += u_2[k][j][i-1];
_t_41_ += mux2 * _t_43_;
_t_44_ = -u_2[k][j][i];
_t_44_ += u_2[k][j][i+1];
_t_41_ += mux3 * _t_44_;
_t_45_ = -u_2[k][j][i];
_t_45_ += u_2[k][j][i+2];
_t_41_ += mux4 * _t_45_;
_t_40_ = strx[i] * _t_41_;
_t_47_ = -u_2[k][j][i];
_t_47_ += u_2[k][j-2][i];
_t_46_ = muy1 * _t_47_;
_t_48_ = -u_2[k][j][i];
_t_48_ += u_2[k][j-1][i];
_t_46_ += muy2 * _t_48_;
_t_49_ = -u_2[k][j][i];
_t_49_ += u_2[k][j+1][i];
_t_46_ += muy3 * _t_49_;
_t_50_ = -u_2[k][j][i];
_t_50_ += u_2[k][j+2][i];
_t_46_ += muy4 * _t_50_;
_t_40_ += stry[j] * _t_46_;
_t_53_ = -u_2[k][j][i];
_t_53_ += u_2[k-2][j][i];
_t_51_ = _t_52_ * _t_53_;
_t_55_ = -u_2[k][j][i];
_t_55_ += u_2[k-1][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_57_ = -u_2[k][j][i];
_t_59_ = -u_2[k][j][i];
_t_57_ += u_2[k+1][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_59_ += u_2[k+2][j][i];
_t_51_ += _t_58_ * _t_59_;
_t_40_ += strz[k] * _t_51_;
r3 = 1.0 / 6.0 * _t_40_;
_t_102_ = stry[j] * strz[k];
_t_100_ = _t_102_ * 1.0 / 144.0;
_t_76_ = stry[j] * strz[k];
_t_74_ = _t_76_ * 1.0 / 144.0;
_t_63_ = strx[i] * strz[k];
_t_61_ = _t_63_ * 1.0 / 144.0;
_t_89_ = strx[i] * strz[k];
_t_87_ = _t_89_ * 1.0 / 144.0;
_t_103_ = u_1[k-2][j-2][i];
_t_77_ = u_1[k-2][j-2][i];
_t_103_ -= u_1[k-2][j+2][i];
_t_85_ = u_1[k-2][j+2][i];
_t_77_ -= u_1[k+2][j-2][i];
_t_111_ = u_1[k+2][j-2][i];
_t_85_ -= u_1[k+2][j+2][i];
_t_111_ -= u_1[k+2][j+2][i];
_t_104_ = -u_1[k-2][j-1][i];
_t_80_ = u_1[k-2][j-1][i];
_t_80_ -= u_1[k+2][j-1][i];
_t_112_ = -u_1[k+2][j-1][i];
_t_104_ += u_1[k-2][j+1][i];
_t_103_ += 8.0 * _t_104_;
_t_83_ = u_1[k-2][j+1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_112_ += u_1[k+2][j+1][i];
_t_111_ += 8.0 * _t_112_;
_t_101_ = la[k-2][j][i] * _t_103_;
_t_101_ -= la[k+2][j][i] * _t_111_;
_t_78_ = -u_1[k-1][j-2][i];
_t_106_ = u_1[k-1][j-2][i];
_t_106_ -= u_1[k-1][j+2][i];
_t_86_ = -u_1[k-1][j+2][i];
_t_78_ += u_1[k+1][j-2][i];
_t_77_ += 8.0 * _t_78_;
_t_109_ = u_1[k+1][j-2][i];
_t_86_ += u_1[k+1][j+2][i];
_t_85_ += 8.0 * _t_86_;
_t_109_ -= u_1[k+1][j+2][i];
_t_75_ = mu[k][j-2][i] * _t_77_;
_t_75_ -= mu[k][j+2][i] * _t_85_;
_t_107_ = -u_1[k-1][j-1][i];
_t_81_ = -u_1[k-1][j-1][i];
_t_107_ += u_1[k-1][j+1][i];
_t_106_ += 8.0 * _t_107_;
_t_84_ = -u_1[k-1][j+1][i];
_t_105_ = la[k-1][j][i] * _t_106_;
_t_101_ -= 8.0 * _t_105_;
_t_81_ += u_1[k+1][j-1][i];
_t_80_ += 8.0 * _t_81_;
_t_110_ = -u_1[k+1][j-1][i];
_t_84_ += u_1[k+1][j+1][i];
_t_83_ += 8.0 * _t_84_;
_t_110_ += u_1[k+1][j+1][i];
_t_109_ += 8.0 * _t_110_;
_t_108_ = la[k+1][j][i] * _t_109_;
_t_101_ += 8.0 * _t_108_;
_t_60_ = _t_100_ * _t_101_;
_t_79_ = mu[k][j-1][i] * _t_80_;
_t_75_ -= 8.0 * _t_79_;
_t_82_ = mu[k][j+1][i] * _t_83_;
_t_75_ += 8.0 * _t_82_;
_t_60_ += _t_74_ * _t_75_;
_t_64_ = u_0[k-2][j][i-2];
_t_90_ = u_0[k-2][j][i-2];
_t_90_ -= u_0[k-2][j][i+2];
_t_72_ = u_0[k-2][j][i+2];
_t_64_ -= u_0[k+2][j][i-2];
_t_98_ = u_0[k+2][j][i-2];
_t_72_ -= u_0[k+2][j][i+2];
_t_98_ -= u_0[k+2][j][i+2];
_t_65_ = -u_0[k-1][j][i-2];
_t_93_ = u_0[k-1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
_t_73_ = -u_0[k-1][j][i+2];
_t_65_ += u_0[k+1][j][i-2];
_t_64_ += 8.0 * _t_65_;
_t_96_ = u_0[k+1][j][i-2];
_t_73_ += u_0[k+1][j][i+2];
_t_72_ += 8.0 * _t_73_;
_t_96_ -= u_0[k+1][j][i+2];
_t_62_ = mu[k][j][i-2] * _t_64_;
_t_62_ -= mu[k][j][i+2] * _t_72_;
_t_91_ = -u_0[k-2][j][i-1];
_t_67_ = u_0[k-2][j][i-1];
_t_67_ -= u_0[k+2][j][i-1];
_t_99_ = -u_0[k+2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
_t_90_ += 8.0 * _t_91_;
_t_88_ = la[k-2][j][i] * _t_90_;
_t_70_ = u_0[k-2][j][i+1];
_t_70_ -= u_0[k+2][j][i+1];
_t_99_ += u_0[k+2][j][i+1];
_t_98_ += 8.0 * _t_99_;
_t_88_ -= la[k+2][j][i] * _t_98_;
_t_68_ = -u_0[k-1][j][i-1];
_t_94_ = -u_0[k-1][j][i-1];
_t_94_ += u_0[k-1][j][i+1];
_t_93_ += 8.0 * _t_94_;
_t_92_ = la[k-1][j][i] * _t_93_;
_t_88_ -= 8.0 * _t_92_;
_t_71_ = -u_0[k-1][j][i+1];
_t_68_ += u_0[k+1][j][i-1];
_t_67_ += 8.0 * _t_68_;
_t_97_ = -u_0[k+1][j][i-1];
_t_71_ += u_0[k+1][j][i+1];
_t_70_ += 8.0 * _t_71_;
_t_97_ += u_0[k+1][j][i+1];
_t_96_ += 8.0 * _t_97_;
_t_95_ = la[k+1][j][i] * _t_96_;
_t_88_ += 8.0 * _t_95_;
_t_60_ += _t_87_ * _t_88_;
_t_66_ = mu[k][j][i-1] * _t_67_;
_t_62_ -= 8.0 * _t_66_;
_t_69_ = mu[k][j][i+1] * _t_70_;
_t_62_ += 8.0 * _t_69_;
_t_60_ += _t_61_ * _t_62_;
r3 += _t_60_;
_t_129_ = strx[i] * strz[k];
_t_127_ = _t_129_ * 1.0 / 144.0;
_t_155_ = strx[i] * strz[k];
_t_153_ = _t_155_ * 1.0 / 144.0;
_t_116_ = strx[i] * stry[j];
_t_114_ = _t_116_ * 1.0 / 144.0;
_t_142_ = strx[i] * stry[j];
_t_140_ = _t_142_ * 1.0 / 144.0;
_t_117_ = u_1[k][j-2][i-2];
_t_143_ = u_1[k][j-2][i-2];
_t_143_ -= u_1[k][j-2][i+2];
_t_125_ = u_1[k][j-2][i+2];
_t_117_ -= u_1[k][j+2][i-2];
_t_151_ = u_1[k][j+2][i-2];
_t_125_ -= u_1[k][j+2][i+2];
_t_151_ -= u_1[k][j+2][i+2];
_t_118_ = -u_1[k][j-1][i-2];
_t_146_ = u_1[k][j-1][i-2];
_t_146_ -= u_1[k][j-1][i+2];
_t_126_ = -u_1[k][j-1][i+2];
_t_118_ += u_1[k][j+1][i-2];
_t_117_ += 8.0 * _t_118_;
_t_149_ = u_1[k][j+1][i-2];
_t_126_ += u_1[k][j+1][i+2];
_t_125_ += 8.0 * _t_126_;
_t_149_ -= u_1[k][j+1][i+2];
_t_115_ = la[k][j][i-2] * _t_117_;
_t_115_ -= la[k][j][i+2] * _t_125_;
_t_144_ = -u_1[k][j-2][i-1];
_t_120_ = u_1[k][j-2][i-1];
_t_120_ -= u_1[k][j+2][i-1];
_t_152_ = -u_1[k][j+2][i-1];
_t_144_ += u_1[k][j-2][i+1];
_t_143_ += 8.0 * _t_144_;
_t_141_ = mu[k][j-2][i] * _t_143_;
_t_123_ = u_1[k][j-2][i+1];
_t_123_ -= u_1[k][j+2][i+1];
_t_152_ += u_1[k][j+2][i+1];
_t_151_ += 8.0 * _t_152_;
_t_141_ -= mu[k][j+2][i] * _t_151_;
_t_121_ = -u_1[k][j-1][i-1];
_t_147_ = -u_1[k][j-1][i-1];
_t_147_ += u_1[k][j-1][i+1];
_t_146_ += 8.0 * _t_147_;
_t_145_ = mu[k][j-1][i] * _t_146_;
_t_141_ -= 8.0 * _t_145_;
_t_124_ = -u_1[k][j-1][i+1];
_t_121_ += u_1[k][j+1][i-1];
_t_120_ += 8.0 * _t_121_;
_t_150_ = -u_1[k][j+1][i-1];
_t_124_ += u_1[k][j+1][i+1];
_t_123_ += 8.0 * _t_124_;
_t_150_ += u_1[k][j+1][i+1];
_t_149_ += 8.0 * _t_150_;
_t_148_ = mu[k][j+1][i] * _t_149_;
_t_141_ += 8.0 * _t_148_;
_t_113_ = _t_140_ * _t_141_;
_t_119_ = la[k][j][i-1] * _t_120_;
_t_115_ -= 8.0 * _t_119_;
_t_122_ = la[k][j][i+1] * _t_123_;
_t_115_ += 8.0 * _t_122_;
_t_113_ += _t_114_ * _t_115_;
_t_130_ = u_2[k-2][j][i-2];
_t_156_ = u_2[k-2][j][i-2];
_t_156_ -= u_2[k-2][j][i+2];
_t_138_ = u_2[k-2][j][i+2];
_t_130_ -= u_2[k+2][j][i-2];
_t_164_ = u_2[k+2][j][i-2];
_t_138_ -= u_2[k+2][j][i+2];
_t_164_ -= u_2[k+2][j][i+2];
_t_131_ = -u_2[k-1][j][i-2];
_t_159_ = u_2[k-1][j][i-2];
_t_159_ -= u_2[k-1][j][i+2];
_t_139_ = -u_2[k-1][j][i+2];
_t_131_ += u_2[k+1][j][i-2];
_t_130_ += 8.0 * _t_131_;
_t_128_ = la[k][j][i-2] * _t_130_;
_t_162_ = u_2[k+1][j][i-2];
_t_139_ += u_2[k+1][j][i+2];
_t_138_ += 8.0 * _t_139_;
_t_128_ -= la[k][j][i+2] * _t_138_;
_t_162_ -= u_2[k+1][j][i+2];
_t_157_ = -u_2[k-2][j][i-1];
_t_133_ = u_2[k-2][j][i-1];
_t_133_ -= u_2[k+2][j][i-1];
_t_165_ = -u_2[k+2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
_t_156_ += 8.0 * _t_157_;
_t_136_ = u_2[k-2][j][i+1];
_t_136_ -= u_2[k+2][j][i+1];
_t_165_ += u_2[k+2][j][i+1];
_t_164_ += 8.0 * _t_165_;
_t_154_ = mu[k-2][j][i] * _t_156_;
_t_154_ -= mu[k+2][j][i] * _t_164_;
_t_134_ = -u_2[k-1][j][i-1];
_t_160_ = -u_2[k-1][j][i-1];
_t_160_ += u_2[k-1][j][i+1];
_t_159_ += 8.0 * _t_160_;
_t_137_ = -u_2[k-1][j][i+1];
_t_158_ = mu[k-1][j][i] * _t_159_;
_t_154_ -= 8.0 * _t_158_;
_t_134_ += u_2[k+1][j][i-1];
_t_133_ += 8.0 * _t_134_;
_t_132_ = la[k][j][i-1] * _t_133_;
_t_128_ -= 8.0 * _t_132_;
_t_163_ = -u_2[k+1][j][i-1];
_t_137_ += u_2[k+1][j][i+1];
_t_136_ += 8.0 * _t_137_;
_t_163_ += u_2[k+1][j][i+1];
_t_162_ += 8.0 * _t_163_;
_t_135_ = la[k][j][i+1] * _t_136_;
_t_128_ += 8.0 * _t_135_;
_t_113_ += _t_127_ * _t_128_;
_t_161_ = mu[k+1][j][i] * _t_162_;
_t_154_ += 8.0 * _t_161_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
_t_169_ = strx[i] * stry[j];
_t_182_ = strx[i] * stry[j];
_t_167_ = _t_169_ * 1.0 / 144.0;
_t_180_ = _t_182_ * 1.0 / 144.0;
_t_195_ = stry[j] * strz[k];
_t_208_ = stry[j] * strz[k];
_t_193_ = _t_195_ * 1.0 / 144.0;
_t_206_ = _t_208_ * 1.0 / 144.0;
_t_170_ = u_0[k][j-2][i-2];
_t_183_ = u_0[k][j-2][i-2];
_t_183_ -= u_0[k][j-2][i+2];
_t_178_ = u_0[k][j-2][i+2];
_t_170_ -= u_0[k][j+2][i-2];
_t_191_ = u_0[k][j+2][i-2];
_t_178_ -= u_0[k][j+2][i+2];
_t_191_ -= u_0[k][j+2][i+2];
_t_171_ = -u_0[k][j-1][i-2];
_t_186_ = u_0[k][j-1][i-2];
_t_186_ -= u_0[k][j-1][i+2];
_t_179_ = -u_0[k][j-1][i+2];
_t_171_ += u_0[k][j+1][i-2];
_t_170_ += 8.0 * _t_171_;
_t_168_ = mu[k][j][i-2] * _t_170_;
_t_189_ = u_0[k][j+1][i-2];
_t_179_ += u_0[k][j+1][i+2];
_t_178_ += 8.0 * _t_179_;
_t_168_ -= mu[k][j][i+2] * _t_178_;
_t_189_ -= u_0[k][j+1][i+2];
_t_184_ = -u_0[k][j-2][i-1];
_t_173_ = u_0[k][j-2][i-1];
_t_173_ -= u_0[k][j+2][i-1];
_t_192_ = -u_0[k][j+2][i-1];
_t_184_ += u_0[k][j-2][i+1];
_t_183_ += 8.0 * _t_184_;
_t_176_ = u_0[k][j-2][i+1];
_t_176_ -= u_0[k][j+2][i+1];
_t_192_ += u_0[k][j+2][i+1];
_t_191_ += 8.0 * _t_192_;
_t_181_ = la[k][j-2][i] * _t_183_;
_t_181_ -= la[k][j+2][i] * _t_191_;
_t_174_ = -u_0[k][j-1][i-1];
_t_187_ = -u_0[k][j-1][i-1];
_t_187_ += u_0[k][j-1][i+1];
_t_186_ += 8.0 * _t_187_;
_t_177_ = -u_0[k][j-1][i+1];
_t_185_ = la[k][j-1][i] * _t_186_;
_t_181_ -= 8.0 * _t_185_;
_t_174_ += u_0[k][j+1][i-1];
_t_173_ += 8.0 * _t_174_;
_t_172_ = mu[k][j][i-1] * _t_173_;
_t_168_ -= 8.0 * _t_172_;
_t_190_ = -u_0[k][j+1][i-1];
_t_177_ += u_0[k][j+1][i+1];
_t_176_ += 8.0 * _t_177_;
_t_190_ += u_0[k][j+1][i+1];
_t_189_ += 8.0 * _t_190_;
_t_175_ = mu[k][j][i+1] * _t_176_;
_t_168_ += 8.0 * _t_175_;
_t_166_ = _t_167_ * _t_168_;
_t_188_ = la[k][j+1][i] * _t_189_;
_t_181_ += 8.0 * _t_188_;
_t_166_ += _t_180_ * _t_181_;
_t_196_ = u_2[k-2][j-2][i];
_t_209_ = u_2[k-2][j-2][i];
_t_209_ -= u_2[k-2][j+2][i];
_t_204_ = u_2[k-2][j+2][i];
_t_196_ -= u_2[k+2][j-2][i];
_t_217_ = u_2[k+2][j-2][i];
_t_204_ -= u_2[k+2][j+2][i];
_t_217_ -= u_2[k+2][j+2][i];
_t_197_ = -u_2[k-1][j-2][i];
_t_212_ = u_2[k-1][j-2][i];
_t_212_ -= u_2[k-1][j+2][i];
_t_205_ = -u_2[k-1][j+2][i];
_t_197_ += u_2[k+1][j-2][i];
_t_196_ += 8.0 * _t_197_;
_t_194_ = la[k][j-2][i] * _t_196_;
_t_215_ = u_2[k+1][j-2][i];
_t_205_ += u_2[k+1][j+2][i];
_t_204_ += 8.0 * _t_205_;
_t_194_ -= la[k][j+2][i] * _t_204_;
_t_215_ -= u_2[k+1][j+2][i];
_t_210_ = -u_2[k-2][j-1][i];
_t_199_ = u_2[k-2][j-1][i];
_t_199_ -= u_2[k+2][j-1][i];
_t_218_ = -u_2[k+2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
_t_209_ += 8.0 * _t_210_;
_t_207_ = mu[k-2][j][i] * _t_209_;
_t_202_ = u_2[k-2][j+1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_218_ += u_2[k+2][j+1][i];
_t_217_ += 8.0 * _t_218_;
_t_207_ -= mu[k+2][j][i] * _t_217_;
_t_200_ = -u_2[k-1][j-1][i];
_t_213_ = -u_2[k-1][j-1][i];
_t_213_ += u_2[k-1][j+1][i];
_t_212_ += 8.0 * _t_213_;
_t_211_ = mu[k-1][j][i] * _t_212_;
_t_207_ -= 8.0 * _t_211_;
_t_203_ = -u_2[k-1][j+1][i];
_t_200_ += u_2[k+1][j-1][i];
_t_199_ += 8.0 * _t_200_;
_t_198_ = la[k][j-1][i] * _t_199_;
_t_194_ -= 8.0 * _t_198_;
_t_216_ = -u_2[k+1][j-1][i];
_t_203_ += u_2[k+1][j+1][i];
_t_202_ += 8.0 * _t_203_;
_t_216_ += u_2[k+1][j+1][i];
_t_215_ += 8.0 * _t_216_;
_t_201_ = la[k][j+1][i] * _t_202_;
_t_194_ += 8.0 * _t_201_;
_t_166_ += _t_193_ * _t_194_;
_t_214_ = mu[k+1][j][i] * _t_215_;
_t_207_ += 8.0 * _t_214_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i];
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i];
uacc_2kc0jc0ic0 += cof * r3;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
0d09013ff017ab78c71ace14f14c4a615720aebc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlacpy_conj.cu normal z -> s, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old sswap routine. Update to new slacpy code for 2D matrix?
__global__ void slacpy_conj_kernel(
int n,
float *A1, int lda1,
float *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_S_CONJ( A1[offset1] );
}
}
extern "C" void
magmablas_slacpy_conj_q(
magma_int_t n,
magmaFloat_ptr dA1, magma_int_t lda1,
magmaFloat_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( slacpy_conj_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dA1, lda1, dA2, lda2 );
}
| 0d09013ff017ab78c71ace14f14c4a615720aebc.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlacpy_conj.cu normal z -> s, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old sswap routine. Update to new slacpy code for 2D matrix?
__global__ void slacpy_conj_kernel(
int n,
float *A1, int lda1,
float *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_S_CONJ( A1[offset1] );
}
}
extern "C" void
magmablas_slacpy_conj_q(
magma_int_t n,
magmaFloat_ptr dA1, magma_int_t lda1,
magmaFloat_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
slacpy_conj_kernel<<< blocks, threads, 0, queue->cuda_stream() >>>( n, dA1, lda1, dA2, lda2 );
}
|
f28195f501acc9324005e529f2bf923ed5390081.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <malloc.h>
#include <omp.h>
#include <iostream>
#include <cmath>
#include <vector>
#include <numeric> // std::iota
#include <algorithm> // std::sort
using namespace std;
#define BLOCK_SIZE 8
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
double *S; //Symmetric matrix (input)
double *e; //eigenvalues
double *E; //eigenvectors
int *ind;
bool *changed;
int state;
int N;
void print_vectors(double* vec,int size){
for (int i = 0; i < size; ++i){
std::cout<<vec[i]<<" ";
}
std::cout<<"\n";
}
void pprint_matrix(double* Mat,int M,int N){
for (int i = 0; i < M; ++i){
for (int j = 0; j < N; ++j){
std::cout<<Mat[N*i+j]<<" ";
}
std::cout<<"\n";
}
std::cout<<"\n";
}
double* mat_mul(double* A, int Am, int An,
double* B, int Bm, int Bn){
double *C;
C = (double*)malloc(__SIZEOF_DOUBLE__*Am*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i*Bn + j] = 0;
for (int k=0; k<An; k++){
C[i*Bn+j] += A[i*An+k] * B[k*Bn+j];
}
}
}
return C;
}
double* mat_transpose(double* A, int Am, int An) {
double *B;
B = (double*)malloc(__SIZEOF_DOUBLE__*An*Am);
for (int i=0; i<Am; i++){
for (int j=0; j<An; j++){
B[ j*Am + i] = A[ i*An + j];
}
}
return B;
}
// __global__
// void transpose_kernel(double *A, double *B,int Am,int An);
// __global__
// void transpose_kernel(double *A, double *B,int Am,int An){
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
// __syncthreads();
// if (x >= Am || y >= An)
// return;
// B[x*Am + y] = A[y*An +x];
// }
double* cuda_transpose(double* A,int Am,int An){
double* B;
B = mat_transpose(A,Am,An);
return(B);
// double *dA,*dB;
// double *B;
// B = (double*)malloc(sizeof(double)*Am*An);
// hipMalloc(&dA,sizeof(double)*Am*An);
// hipMalloc(&dB,sizeof(double)*An*Am);
// hipMemcpy(A,dA,sizeof(double)*Am*An,hipMemcpyHostToDevice);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid((Am + dimBlock.x - 1) / dimBlock.x,(An + dimBlock.y - 1) / dimBlock.y);
// transpose_kernel<<<dimGrid,dimBlock>>>(dA,dB,Am,An);
// hipDeviceSynchronize();
// hipMemcpy(dB,B,sizeof(double)*An*Am,hipMemcpyDeviceToHost);
// hipFree(dA);
// hipFree(dB);
// return(B);
}
// __global__
// void MatMulKernel(double* A, double* B, double* C,int Am,int An,int Bm,int Bn);
// __global__
// void MatMulKernel(double* A, double* B, double* C,int Am,int An,int Bm,int Bn) {
// double Cvalue = 0;
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// __syncthreads();
// if(row > Am || col > Bn) return;
// for (int e = 0; e < An; ++e)
// Cvalue += A[row * An + e] *B[e * Bn + col];
// C[row * Bn + col] = Cvalue;
// }
double* cuda_matmul(double* A,double* B,int Am,int An,int Bm,int Bn){
double* C;
C = mat_mul(A,Am,An,B,Bm,Bn);
return(C);
// double *dA,*dB,*dC;
// double* C = (double*)malloc(sizeof(double)*Am*Bn);
// hipMalloc(&dA,sizeof(double)*Am*An);
// hipMalloc(&dB,sizeof(double)*Bm*Bn);
// hipMalloc(&dC,sizeof(double)*Am*Bn);
// hipMemcpy(A,dA,sizeof(double)*Am*An,hipMemcpyHostToDevice);
// hipMemcpy(B,dB,sizeof(double)*Bm*Bn,hipMemcpyHostToDevice);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid((Bn + dimBlock.x - 1) / dimBlock.x,(Am + dimBlock.y - 1) / dimBlock.y);
// MatMulKernel<<<dimGrid,dimBlock>>>(dA,dB,dC,Am,An,Bm,Bn);
// hipDeviceSynchronize();
// hipMemcpy(dC,C,sizeof(double)*Am*Bn,hipMemcpyDeviceToHost);
// hipFree(dA);
// hipFree(dB);
// hipFree(dC);
// return(C);
}
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N; i++){
if (fabs(S[k*N+i]) > fabs(S[k*N+m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,bool eigenvectors){
double mat1_00 , mat1_01;
double mat1_10 , mat1_11;
mat1_00 = c; mat1_01 = -s;
mat1_10 = s; mat1_11 = c;
double mat2_00 , mat2_10;
if(eigenvectors){
mat2_00 = E[i*N + k];
mat2_10 = E[i*N + l];
}
else{
mat2_00 = S[k*N + l];
mat2_10 = S[i*N + j];
}
double mat3_00;
double mat3_10;
mat3_00 = mat1_00*mat2_00 + mat1_01*mat2_10;
mat3_10 = mat1_10*mat2_00 + mat1_11*mat2_10;
if (eigenvectors){
E[i*N + k] = mat3_00;
E[i*N + l] = mat3_10;
}
else{
S[k*N + l] = mat3_00;
S[i*N + j] = mat3_10;
}
}
void print_matrix(double* A, int Am, int An) {
cout << "[";
for (int i=0; i<Am; i++){
if (i>0)
cout<<" ";
cout<<"[";
for (int j=0; j<An-1; j++){
cout << A[i*An+j] << ", ";
}
if (i < Am-1)
cout << A[i*An+An-1] << "]" << endl;
}
cout << A[(Am-1)*An+An-1] << "]]" << endl;
}
// void print_vector(double* A, int An) {
// cout << "[";
// for(int i=0; i<An-1; i++)
// cout << A[i] << ",";
// cout << A[An-1] << "]" << endl;
// }
void init_jacobi() {
E = (double*)malloc(sizeof(double)*N*N);
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
E[i*N+j] = 0;
}
E[i*N+i] = 1;
}
state = N;
e = (double*)malloc(sizeof(double)*N);
ind = (int*)malloc(__SIZEOF_INT__*N);
changed = (bool*)malloc(sizeof(bool)*N);
for (int k=0; k<N; k++){
ind[k] = maxind(k);
e[k] = S[k*N+k];
changed[k] = true;
}
}
void Jacobi(double *input_matrix, int n,double **eigenvalues, double **eigenvectors) {
N = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N-1; k++){
if (fabs(S[k*n+ind[k]]) > fabs(S[m*n+ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k*n+l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k*n+l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
///////////////////////////////////////
// Decesending Order
vector<size_t> sort_indices(double* v,int size) {
// initialize original index locations
vector<size_t> idx(size);
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
sort(idx.begin(), idx.end(),
[v](size_t i1, size_t i2) {return v[i1] > v[i2];});
return idx;
}
double norm_row_vec(double* v,int n ){
double norm=0;
for(int i=0;i<n;i++){
norm+=v[i]*v[i];
}
norm=sqrt(norm);
return(norm);
}
double inner_product_rv(double* v1,double* v2,int n){
double inp=0;
for(int i=0;i< n;i++){
inp+=v1[i]*v2[i];
}
return(inp);
}
// void QRfactorisations(double* A,double* Q,double* R,int n){
// // Assume R is initialised to n X n zero matrix
// // A:dim N X N
// // Assume vectors are represented in col major form in Q,V
// // we will do caculations in row major
// double* V_T=(double*)(malloc(sizeof(double)*n*n));
// V_T = mat_transpose(A,n,n);
// // print_matrix(V_T,n,n);
// double* Q_T=(double*)(malloc(sizeof(double)*n*n));
// for (int i=0 ; i<n ; i++){
// R[n*i+i]=norm_row_vec(V_T+n*i,n);//R[i][i]
// for(int j=0;j<n;j++){
// Q_T[n*i+j] = V_T[n*i+j]/R[n*i+i];//R[i][i]
// }
// // #pragma omp parallel for
// for (int j=i+1;j<n;j++){
// R[n*i+j] = inner_product_rv(Q_T+n*i,V_T+n*j,n);
// for(int k=0;k<n;k++){
// V_T[n*j+k] = V_T[n*j+k] -R[n*i+j]*Q_T[n*i+k];
// }
// }
// }
// Q = mat_transpose(Q_T,n,n);
// // std::cout<<"R\n";
// // print_matrix(R,n,n);
// }
// void check_result(double *D,double* U,double* SIGMA,double* V_T,int M,int N){
// double* sig_m=(double*)(malloc(sizeof(double)*N*M));
// for (int i = 0; i < N; ++i) {
// for (int j = 0; j < M; ++j){
// sig_m[M*i+j]=0;
// }
// }
// for (int i = 0; i < N; ++i){
// sig_m[M*i+i]=SIGMA[i];
// }
// double* temp;//=(double*)(malloc(sizeof(double)*N*M));
// temp =cuda_matmul(U,sig_m,N,N,N,M);
// double* temp2;//=(double*)(malloc(sizeof(double)*N*M));
// temp2 =cuda_matmul(temp,V_T,N,M,M,M);
// std::cout<<"res \n";
// print_matrix(temp2,N,M);
// double* D_T;//=(double*)malloc(sizeof(double)*M*N);
// D_T = transpose(D,M,N);
// std::cout<<"D_T\n";
// print_matrix(D_T,N,M);
// }
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD(int M, int N, double* D, double** U, double** SIGMA,int SIGMAm,int SIGMAn,double** V_T){
// double* V_T_p=*V_T;
double* D_T;// =(double*)(malloc(sizeof(double)*N*M));
D_T = cuda_transpose(D,M,N);
double* A;//=(double*)(malloc(sizeof(double)*N*N));
A = cuda_matmul(D_T,D,N,M,M,N);
double *eigenvalues,*eigenvectors;
Jacobi(A,N,(double**)&eigenvalues,(double**)&eigenvectors);
for (int i = 0; i < N; ++i){
(*SIGMA)[i] = eigenvalues[i];
}
double* sig_temp=(double*)(malloc(sizeof(double)*N));
int idx=0;
std::vector<size_t> sort_idx = sort_indices((*SIGMA),N);
for(auto i: sort_idx){
sig_temp[idx]=(*SIGMA)[i];
idx+=1;
}
for (int i = 0; i < N; ++i){
(*SIGMA)[i]=sqrt(sig_temp[i]);
}
double* E_T;//=(double*)(malloc(sizeof(double)*N*N));
double* U_T=(double*)(malloc(sizeof(double)*N*N));
// pprint_matrix(*SIGMA,1,N);
E_T = cuda_transpose(eigenvectors,N,N);
idx=0;
for (auto i :sort_idx){
for (int j = 0; j < N; ++j){
(U_T)[N*idx+j]=E_T[N*i+j];
}
idx+=1;
}
*U = U_T;
double* sig_inv_ut=(double*)(malloc(sizeof(double)*M*N));//Sigma_inv.U_T
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
sig_inv_ut[N*i+j]=U_T[N*i+j]/(*SIGMA)[i];
}
}
for (int i = N; i < M; ++i){
for (int j = 0; j < N; ++j){
sig_inv_ut[N*i+j]=0;
}
}
(*V_T) = cuda_matmul(sig_inv_ut,D_T,M,N,N,M);
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void PCA(int retention, int M, int N, double* D, double* U, double* SIGMA, double** D_HAT, int *K){
double* total =(double*)malloc(sizeof(double)*N);
total[0]=SIGMA[0]*SIGMA[0];
for (int i = 1; i < N; ++i){
total[i]=SIGMA[i]*SIGMA[i]+total[i-1];
}
*K=0;
for (int i = 0; i <N; ++i){
*K=1+*K;
if ((total[i]/total[N-1])*100>= retention){
break;
}
}
// *D_HAT=(double*)malloc(sizeof(double)*M*(*K));
double* U_temp=(double*)malloc(sizeof(double)*N*(*K));
// #pragma omp parallel for collapse(2)
for (int i = 0; i < N; ++i){
for (int j = 0; j < (*K); ++j){
U_temp[(*K)*i+j]=U[N*i+j];
}
}
(*D_HAT) = cuda_matmul(D,U_temp,M,N,N,*K);
// std::cout<<"U\n";
// print_matrix(U,N,N);
// std::cout<<"D_HAT\n";
// print_matrix((*D_HAT),M,*K);
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
int* SIGMAm,
int* SIGMAn,
double** D_HAT,
int *K,
int retention) {
// write your code here
*SIGMA = (double*)malloc(sizeof(double)*N);
SVD(M, N, D,U, SIGMA,*SIGMAm,*SIGMAn,V_T);
PCA(retention, M, N, D, *U, *SIGMA, D_HAT,K);
}
| f28195f501acc9324005e529f2bf923ed5390081.cu | #include <malloc.h>
#include <omp.h>
#include <iostream>
#include <cmath>
#include <vector>
#include <numeric> // std::iota
#include <algorithm> // std::sort
using namespace std;
#define BLOCK_SIZE 8
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
double *S; //Symmetric matrix (input)
double *e; //eigenvalues
double *E; //eigenvectors
int *ind;
bool *changed;
int state;
int N;
void print_vectors(double* vec,int size){
for (int i = 0; i < size; ++i){
std::cout<<vec[i]<<" ";
}
std::cout<<"\n";
}
void pprint_matrix(double* Mat,int M,int N){
for (int i = 0; i < M; ++i){
for (int j = 0; j < N; ++j){
std::cout<<Mat[N*i+j]<<" ";
}
std::cout<<"\n";
}
std::cout<<"\n";
}
double* mat_mul(double* A, int Am, int An,
double* B, int Bm, int Bn){
double *C;
C = (double*)malloc(__SIZEOF_DOUBLE__*Am*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i*Bn + j] = 0;
for (int k=0; k<An; k++){
C[i*Bn+j] += A[i*An+k] * B[k*Bn+j];
}
}
}
return C;
}
double* mat_transpose(double* A, int Am, int An) {
double *B;
B = (double*)malloc(__SIZEOF_DOUBLE__*An*Am);
for (int i=0; i<Am; i++){
for (int j=0; j<An; j++){
B[ j*Am + i] = A[ i*An + j];
}
}
return B;
}
// __global__
// void transpose_kernel(double *A, double *B,int Am,int An);
// __global__
// void transpose_kernel(double *A, double *B,int Am,int An){
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
// __syncthreads();
// if (x >= Am || y >= An)
// return;
// B[x*Am + y] = A[y*An +x];
// }
double* cuda_transpose(double* A,int Am,int An){
double* B;
B = mat_transpose(A,Am,An);
return(B);
// double *dA,*dB;
// double *B;
// B = (double*)malloc(sizeof(double)*Am*An);
// cudaMalloc(&dA,sizeof(double)*Am*An);
// cudaMalloc(&dB,sizeof(double)*An*Am);
// cudaMemcpy(A,dA,sizeof(double)*Am*An,cudaMemcpyHostToDevice);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid((Am + dimBlock.x - 1) / dimBlock.x,(An + dimBlock.y - 1) / dimBlock.y);
// transpose_kernel<<<dimGrid,dimBlock>>>(dA,dB,Am,An);
// cudaThreadSynchronize();
// cudaMemcpy(dB,B,sizeof(double)*An*Am,cudaMemcpyDeviceToHost);
// cudaFree(dA);
// cudaFree(dB);
// return(B);
}
// __global__
// void MatMulKernel(double* A, double* B, double* C,int Am,int An,int Bm,int Bn);
// __global__
// void MatMulKernel(double* A, double* B, double* C,int Am,int An,int Bm,int Bn) {
// double Cvalue = 0;
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// __syncthreads();
// if(row > Am || col > Bn) return;
// for (int e = 0; e < An; ++e)
// Cvalue += A[row * An + e] *B[e * Bn + col];
// C[row * Bn + col] = Cvalue;
// }
double* cuda_matmul(double* A,double* B,int Am,int An,int Bm,int Bn){
double* C;
C = mat_mul(A,Am,An,B,Bm,Bn);
return(C);
// double *dA,*dB,*dC;
// double* C = (double*)malloc(sizeof(double)*Am*Bn);
// cudaMalloc(&dA,sizeof(double)*Am*An);
// cudaMalloc(&dB,sizeof(double)*Bm*Bn);
// cudaMalloc(&dC,sizeof(double)*Am*Bn);
// cudaMemcpy(A,dA,sizeof(double)*Am*An,cudaMemcpyHostToDevice);
// cudaMemcpy(B,dB,sizeof(double)*Bm*Bn,cudaMemcpyHostToDevice);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid((Bn + dimBlock.x - 1) / dimBlock.x,(Am + dimBlock.y - 1) / dimBlock.y);
// MatMulKernel<<<dimGrid,dimBlock>>>(dA,dB,dC,Am,An,Bm,Bn);
// cudaThreadSynchronize();
// cudaMemcpy(dC,C,sizeof(double)*Am*Bn,cudaMemcpyDeviceToHost);
// cudaFree(dA);
// cudaFree(dB);
// cudaFree(dC);
// return(C);
}
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N; i++){
if (fabs(S[k*N+i]) > fabs(S[k*N+m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,bool eigenvectors){
double mat1_00 , mat1_01;
double mat1_10 , mat1_11;
mat1_00 = c; mat1_01 = -s;
mat1_10 = s; mat1_11 = c;
double mat2_00 , mat2_10;
if(eigenvectors){
mat2_00 = E[i*N + k];
mat2_10 = E[i*N + l];
}
else{
mat2_00 = S[k*N + l];
mat2_10 = S[i*N + j];
}
double mat3_00;
double mat3_10;
mat3_00 = mat1_00*mat2_00 + mat1_01*mat2_10;
mat3_10 = mat1_10*mat2_00 + mat1_11*mat2_10;
if (eigenvectors){
E[i*N + k] = mat3_00;
E[i*N + l] = mat3_10;
}
else{
S[k*N + l] = mat3_00;
S[i*N + j] = mat3_10;
}
}
void print_matrix(double* A, int Am, int An) {
cout << "[";
for (int i=0; i<Am; i++){
if (i>0)
cout<<" ";
cout<<"[";
for (int j=0; j<An-1; j++){
cout << A[i*An+j] << ", ";
}
if (i < Am-1)
cout << A[i*An+An-1] << "]" << endl;
}
cout << A[(Am-1)*An+An-1] << "]]" << endl;
}
// void print_vector(double* A, int An) {
// cout << "[";
// for(int i=0; i<An-1; i++)
// cout << A[i] << ",";
// cout << A[An-1] << "]" << endl;
// }
void init_jacobi() {
E = (double*)malloc(sizeof(double)*N*N);
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
E[i*N+j] = 0;
}
E[i*N+i] = 1;
}
state = N;
e = (double*)malloc(sizeof(double)*N);
ind = (int*)malloc(__SIZEOF_INT__*N);
changed = (bool*)malloc(sizeof(bool)*N);
for (int k=0; k<N; k++){
ind[k] = maxind(k);
e[k] = S[k*N+k];
changed[k] = true;
}
}
void Jacobi(double *input_matrix, int n,double **eigenvalues, double **eigenvectors) {
N = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N-1; k++){
if (fabs(S[k*n+ind[k]]) > fabs(S[m*n+ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k*n+l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k*n+l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
///////////////////////////////////////
// Decesending Order
vector<size_t> sort_indices(double* v,int size) {
// initialize original index locations
vector<size_t> idx(size);
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
sort(idx.begin(), idx.end(),
[v](size_t i1, size_t i2) {return v[i1] > v[i2];});
return idx;
}
double norm_row_vec(double* v,int n ){
double norm=0;
for(int i=0;i<n;i++){
norm+=v[i]*v[i];
}
norm=sqrt(norm);
return(norm);
}
double inner_product_rv(double* v1,double* v2,int n){
double inp=0;
for(int i=0;i< n;i++){
inp+=v1[i]*v2[i];
}
return(inp);
}
// void QRfactorisations(double* A,double* Q,double* R,int n){
// // Assume R is initialised to n X n zero matrix
// // A:dim N X N
// // Assume vectors are represented in col major form in Q,V
// // we will do caculations in row major
// double* V_T=(double*)(malloc(sizeof(double)*n*n));
// V_T = mat_transpose(A,n,n);
// // print_matrix(V_T,n,n);
// double* Q_T=(double*)(malloc(sizeof(double)*n*n));
// for (int i=0 ; i<n ; i++){
// R[n*i+i]=norm_row_vec(V_T+n*i,n);//R[i][i]
// for(int j=0;j<n;j++){
// Q_T[n*i+j] = V_T[n*i+j]/R[n*i+i];//R[i][i]
// }
// // #pragma omp parallel for
// for (int j=i+1;j<n;j++){
// R[n*i+j] = inner_product_rv(Q_T+n*i,V_T+n*j,n);
// for(int k=0;k<n;k++){
// V_T[n*j+k] = V_T[n*j+k] -R[n*i+j]*Q_T[n*i+k];
// }
// }
// }
// Q = mat_transpose(Q_T,n,n);
// // std::cout<<"R\n";
// // print_matrix(R,n,n);
// }
// void check_result(double *D,double* U,double* SIGMA,double* V_T,int M,int N){
// double* sig_m=(double*)(malloc(sizeof(double)*N*M));
// for (int i = 0; i < N; ++i) {
// for (int j = 0; j < M; ++j){
// sig_m[M*i+j]=0;
// }
// }
// for (int i = 0; i < N; ++i){
// sig_m[M*i+i]=SIGMA[i];
// }
// double* temp;//=(double*)(malloc(sizeof(double)*N*M));
// temp =cuda_matmul(U,sig_m,N,N,N,M);
// double* temp2;//=(double*)(malloc(sizeof(double)*N*M));
// temp2 =cuda_matmul(temp,V_T,N,M,M,M);
// std::cout<<"res \n";
// print_matrix(temp2,N,M);
// double* D_T;//=(double*)malloc(sizeof(double)*M*N);
// D_T = transpose(D,M,N);
// std::cout<<"D_T\n";
// print_matrix(D_T,N,M);
// }
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD(int M, int N, double* D, double** U, double** SIGMA,int SIGMAm,int SIGMAn,double** V_T){
// double* V_T_p=*V_T;
double* D_T;// =(double*)(malloc(sizeof(double)*N*M));
D_T = cuda_transpose(D,M,N);
double* A;//=(double*)(malloc(sizeof(double)*N*N));
A = cuda_matmul(D_T,D,N,M,M,N);
double *eigenvalues,*eigenvectors;
Jacobi(A,N,(double**)&eigenvalues,(double**)&eigenvectors);
for (int i = 0; i < N; ++i){
(*SIGMA)[i] = eigenvalues[i];
}
double* sig_temp=(double*)(malloc(sizeof(double)*N));
int idx=0;
std::vector<size_t> sort_idx = sort_indices((*SIGMA),N);
for(auto i: sort_idx){
sig_temp[idx]=(*SIGMA)[i];
idx+=1;
}
for (int i = 0; i < N; ++i){
(*SIGMA)[i]=sqrt(sig_temp[i]);
}
double* E_T;//=(double*)(malloc(sizeof(double)*N*N));
double* U_T=(double*)(malloc(sizeof(double)*N*N));
// pprint_matrix(*SIGMA,1,N);
E_T = cuda_transpose(eigenvectors,N,N);
idx=0;
for (auto i :sort_idx){
for (int j = 0; j < N; ++j){
(U_T)[N*idx+j]=E_T[N*i+j];
}
idx+=1;
}
*U = U_T;
double* sig_inv_ut=(double*)(malloc(sizeof(double)*M*N));//Sigma_inv.U_T
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
sig_inv_ut[N*i+j]=U_T[N*i+j]/(*SIGMA)[i];
}
}
for (int i = N; i < M; ++i){
for (int j = 0; j < N; ++j){
sig_inv_ut[N*i+j]=0;
}
}
(*V_T) = cuda_matmul(sig_inv_ut,D_T,M,N,N,M);
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void PCA(int retention, int M, int N, double* D, double* U, double* SIGMA, double** D_HAT, int *K){
double* total =(double*)malloc(sizeof(double)*N);
total[0]=SIGMA[0]*SIGMA[0];
for (int i = 1; i < N; ++i){
total[i]=SIGMA[i]*SIGMA[i]+total[i-1];
}
*K=0;
for (int i = 0; i <N; ++i){
*K=1+*K;
if ((total[i]/total[N-1])*100>= retention){
break;
}
}
// *D_HAT=(double*)malloc(sizeof(double)*M*(*K));
double* U_temp=(double*)malloc(sizeof(double)*N*(*K));
// #pragma omp parallel for collapse(2)
for (int i = 0; i < N; ++i){
for (int j = 0; j < (*K); ++j){
U_temp[(*K)*i+j]=U[N*i+j];
}
}
(*D_HAT) = cuda_matmul(D,U_temp,M,N,N,*K);
// std::cout<<"U\n";
// print_matrix(U,N,N);
// std::cout<<"D_HAT\n";
// print_matrix((*D_HAT),M,*K);
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
int* SIGMAm,
int* SIGMAn,
double** D_HAT,
int *K,
int retention) {
// write your code here
*SIGMA = (double*)malloc(sizeof(double)*N);
SVD(M, N, D,U, SIGMA,*SIGMAm,*SIGMAn,V_T);
PCA(retention, M, N, D, *U, *SIGMA, D_HAT,K);
}
|
14956aee705b6ece8442a2b209e1dea797ff59bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <vector>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <ctime>
//#include <dirent.h>
//#include <boost/lexical_cast.hpp>
//#include <boost/filesystem.hpp>
//#include <boost/algorithm/string.hpp>
#include "wstd/string.hpp"
#include "caffe/layers/denseblock_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/cudnn.hpp"
namespace caffe {
bool dirExists_cu(string dirStr) {
/* const char* dirCStr = dirStr.c_str();
DIR* dir = opendir(dirCStr);
if (ENOENT == errno){
return false;
}
closedir(dir);*/
return true;
}
void tryCreateDirectory_cu(string fileName) {
/* vector<string> strVec;
boost::split(strVec,fileName,boost::is_any_of("/"));
string newStr="";
for (int i=0;i<strVec.size()-1;++i){
newStr += strVec[i] + (i==strVec.size()-2?"":"/");
}
boost::filesystem::path dirToCreate(newStr);
if (!dirExists_cu(newStr)){
boost::filesystem::create_directories(dirToCreate);
}*/
}
string itos_cu(int i) {
char buf[32] = "";
//_itoa(i, buf, 10);
sprintf(buf,"%d", i);
return buf;
}
template <typename Dtype>
void gpu_copy_one_to_many(const Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_input, int chunkStride_output) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
const Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkSize_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkStride_output;
//printf("inpointer %p\n",inPtr_gpu);
//printf("outpointer %p\n",outPtr_gpu);
CUDA_CHECK(hipMemcpy(outPtr_local, inPtr_local, chunkSize_input * sizeof(Dtype), hipMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void gpu_copy_many_to_one(Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_output, int chunkStride_input) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkStride_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkSize_output;
CUDA_CHECK(hipMemcpy(outPtr_local, inPtr_local, chunkSize_output * sizeof(Dtype), hipMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void print_gpuPtr(Dtype* gpuPtr, int numValues) {
Dtype* cpuPtr = new Dtype[numValues];
hipMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), hipMemcpyDeviceToHost);
for (int i = 0; i < numValues; ++i) {
std::cout << cpuPtr[i] << ",";
}
std::cout << std::endl;
}
template <typename Dtype>
void log_gpuPtr(Dtype* gpuPtr, int numValues, string fileName) {
Dtype* cpuPtr = new Dtype[numValues];
hipMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), hipMemcpyDeviceToHost);
const char* fileName_cstr = fileName.c_str();
tryCreateDirectory_cu(fileName_cstr);
std::ofstream outWriter(fileName_cstr, std::ofstream::out);
for (int i = 0; i < numValues; ++i) {
outWriter << cpuPtr[i] << ",";
}
outWriter << std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::logInternal_gpu(string dir, int TIdx, bool logDynamic, bool logDiff) {
string localDir = dir + "/gpu_" + itos_cu(this->logId) + "/";
if (logDynamic) {
int postBufferSize = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
int quadGBufferSize = N * 4 * growthRate*H*W;
if (logDiff) {
//postConv_grad_gpu
log_gpuPtr<Dtype>(this->postConv_grad_gpu, postBufferSize, localDir + "postConv_grad_gpu_transition" + itos_cu(TIdx));
//postBN_grad_gpu
log_gpuPtr<Dtype>(this->postBN_grad_gpu, postBufferSize, localDir + "postBN_grad_gpu_transition" + itos_cu(TIdx));
//postReLU_grad_gpu
log_gpuPtr<Dtype>(this->postReLU_grad_gpu, postBufferSize, localDir + "postReLU_grad_gpu_transition" + itos_cu(TIdx));
//BC
if (useBC) {
//postConv_4G_grad
log_gpuPtr<Dtype>(this->postConv_4G_grad, quadGBufferSize, localDir + "postConv_4G_grad_transition" + itos_cu(TIdx));
//postBN_4G_grad
log_gpuPtr<Dtype>(this->postBN_4G_grad, quadGBufferSize, localDir + "postBN_4G_grad_transition" + itos_cu(TIdx));
//postReLU_4G_grad
log_gpuPtr<Dtype>(this->postReLU_4G_grad, quadGBufferSize, localDir + "postReLU_4G_grad_transition" + itos_cu(TIdx));
}
}
else {
//postConv_data_gpu
log_gpuPtr<Dtype>(this->postConv_data_gpu, postBufferSize, localDir + "postConv_data_gpu_transition" + itos_cu(TIdx));
//postBN_data_gpu
log_gpuPtr<Dtype>(this->postBN_data_gpu, postBufferSize, localDir + "postBN_data_gpu_transition" + itos_cu(TIdx));
//postReLU_data_gpu
log_gpuPtr<Dtype>(this->postReLU_data_gpu, postBufferSize, localDir + "postReLU_data_gpu_transition" + itos_cu(TIdx));
if (useBC) {
//postConv_4G
if (BC_ultra_spaceEfficient) {
log_gpuPtr<Dtype>(this->postConv_4G, quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
else {
log_gpuPtr<Dtype>(this->postConv_4GVec[TIdx], quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
//postBN_4G
log_gpuPtr<Dtype>(this->postBN_4G, quadGBufferSize, localDir + "postBN_4G_data_transition" + itos_cu(TIdx));
//postReLU_4G
log_gpuPtr<Dtype>(this->postReLU_4G, quadGBufferSize, localDir + "postReLU_4G_data_transition" + itos_cu(TIdx));
}
}
}
else {
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
int numChannel_moreWide = this->initChannel + this->growthRate * transitionIdx;
int numChannel_quadG = 4 * growthRate;
//global Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalVariance_gpu_transition" + itos_cu(transitionIdx));
//ResultSaveMean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveInvVariance_gpu_transition" + itos_cu(transitionIdx));
if (useBC) {
//global BC Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalVar_BC_transition" + itos_cu(transitionIdx));
//ResultSave BC Mean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveInvVariance_BC_transition" + itos_cu(transitionIdx));
}
//Filter_data/grad_gpu
int filterSize;
if (useBC) {
filterSize = 4 * growthRate*growthRate * 3 * 3;
}
else {
filterSize = (this->initChannel + this->growthRate*transitionIdx) * this->growthRate * 3 * 3;
}
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_data(), filterSize, localDir + "Filter_data_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_diff(), filterSize, localDir + "Filter_grad_gpu_" + itos_cu(transitionIdx));
//Scaler_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Scaler_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Scaler_data_gpu_" + itos_cu(transitionIdx));
//Bias_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Bias_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Bias_data_gpu_" + itos_cu(transitionIdx));
if (useBC) {
//BC Filter
int filterBC_size = (initChannel + growthRate*transitionIdx) * 4 * growthRate * 1 * 1;
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data(), filterBC_size, localDir + "Filter_data_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff(), filterBC_size, localDir + "Filter_grad_BC_" + itos_cu(transitionIdx));
//BC scaler
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Scaler_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Scaler_data_BC_" + itos_cu(transitionIdx));
//BC bias
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Bias_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Bias_data_BC_" + itos_cu(transitionIdx));
}
}
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::GPU_Initialization() {
//std::cout<<"Pre DeviceSet"<<std::endl;
//CUDA_CHECK(hipSetDevice(1));
//std::cout<<"Post DeviceSet"<<std::endl;
//GPU intermediate ptrs
#if 1
int bufferSize_byte = this->N*(this->initChannel + this->growthRate*this->numTransition)*this->H*this->W * sizeof(Dtype);
CUDA_CHECK(hipMalloc(&this->postConv_data_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(hipMalloc(&this->postDropout_data_gpu, bufferSize_byte));
}
CUDA_CHECK(hipMalloc(&this->postBN_data_gpu, bufferSize_byte));
CUDA_CHECK(hipMalloc(&this->postReLU_data_gpu, bufferSize_byte));
CUDA_CHECK(hipMalloc(&this->postConv_grad_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(hipMalloc(&this->postDropout_grad_gpu, bufferSize_byte));
}
CUDA_CHECK(hipMalloc(&this->postBN_grad_gpu, bufferSize_byte));
CUDA_CHECK(hipMalloc(&this->postReLU_grad_gpu, bufferSize_byte));
hipMemset(this->postConv_data_gpu, 0, bufferSize_byte);
hipMemset(this->postBN_data_gpu, 0, bufferSize_byte);
hipMemset(this->postReLU_data_gpu, 0, bufferSize_byte);
hipMemset(this->postConv_grad_gpu, 0, bufferSize_byte);
hipMemset(this->postBN_grad_gpu, 0, bufferSize_byte);
hipMemset(this->postReLU_grad_gpu, 0, bufferSize_byte);
#endif
//workspace
CUDA_CHECK(hipMalloc(&this->workspace, this->workspace_size_bytes));
hipMemset(this->workspace, 0, this->workspace_size_bytes);
CUDA_CHECK(hipMalloc(&this->workspace2, this->workspace_size_bytes));
hipMemset(this->workspace2, 0, this->workspace_size_bytes);
//handles and descriptors
//cudnn handle
this->cudnnHandlePtr = new cudnnHandle_t;
cudaPrimalStream = new hipStream_t;
CUDNN_CHECK(cudnnCreate(this->cudnnHandlePtr));
CUDA_CHECK(hipStreamCreate(cudaPrimalStream));
//CUDNN_CHECK(cudnnSetStream(*cudnnHandlePtr,*cudaPrimalStream));
int extraHandle_num = 3;
for (int i = 0; i < extraHandle_num; ++i) {
cudnnHandle_t* localHandle = new cudnnHandle_t;
hipStream_t* localStream = new hipStream_t;
CUDNN_CHECK(cudnnCreate(localHandle));
CUDA_CHECK(hipStreamCreate(localStream));
CUDNN_CHECK(cudnnSetStream(*localHandle, *localStream));
extraHandles.push_back(localHandle);
extraStreams.push_back(localStream);
}
//ReLU Activation Descriptor
this->ReLUDesc = new cudnnActivationDescriptor_t;
cudnn::createActivationDescriptor<Dtype>(ReLUDesc, CUDNN_ACTIVATION_RELU);
//conv_y global tensor descriptor
this->tensorDescriptor_conv_y = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y);
#if 1
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, this->N, this->growthRate, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
#endif
//BC
int quadG_numValues = 4 * N*growthRate*H*W;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (useBC) {
#if 1
CUDA_CHECK(hipMalloc(&postBN_4G, quadG_numBytes));
CUDA_CHECK(hipMalloc(&postBN_4G_grad, quadG_numBytes));
CUDA_CHECK(hipMalloc(&postReLU_4G, quadG_numBytes));
CUDA_CHECK(hipMalloc(&postReLU_4G_grad, quadG_numBytes));
CUDA_CHECK(hipMalloc(&postConv_4G_grad, quadG_numBytes));
hipMemset(postBN_4G, 0, quadG_numBytes);
hipMemset(postBN_4G_grad, 0, quadG_numBytes);
hipMemset(postReLU_4G, 0, quadG_numBytes);
hipMemset(postReLU_4G_grad, 0, quadG_numBytes);
hipMemset(postConv_4G_grad, 0, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
CUDA_CHECK(hipMalloc(&postConv_4G, quadG_numBytes));
hipMemset(postConv_4G, 0, quadG_numBytes);
}
#endif
quadG_tensorDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_tensorDesc);
#if 1
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, N, 4 * growthRate, H, W, 4 * growthRate*H*W, H*W, W, 1);
#endif
quadG_paramDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_paramDesc);
cudnn::setTensor4dDesc<Dtype>(quadG_paramDesc, 1, 4 * growthRate, 1, 1, 4 * growthRate, 1, 1, 1);
convBC_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(convBC_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*convBC_Descriptor, 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
}
//per transition variables
for (int i = 0; i < this->numTransition; ++i) {
//Result Running/Saving Mean/Variance/InvVariance
int localChannel = this->initChannel + i * this->growthRate;
Dtype* local_SaveMean;
Dtype* local_SaveInvVar;
CUDA_CHECK(hipMalloc(&local_SaveMean, localChannel * sizeof(Dtype)));
CUDA_CHECK(hipMalloc(&local_SaveInvVar, localChannel * sizeof(Dtype)));
hipMemset(local_SaveMean, 0, localChannel * sizeof(Dtype));
hipMemset(local_SaveInvVar, 0, localChannel * sizeof(Dtype));
this->ResultSaveMean_gpu.push_back(local_SaveMean);
this->ResultSaveInvVariance_gpu.push_back(local_SaveInvVar);
//conv_x descriptor
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnnTensorDescriptor_t * wide_Desc_local_x = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(wide_Desc_local_x);
#if 1
cudnn::setTensor4dDesc<Dtype>(wide_Desc_local_x, this->N, conv_x_channels, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
this->tensorDescriptorVec_conv_x.push_back(wide_Desc_local_x);
#endif
//filter Descriptor for Convolution
if (!useBC) {
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, conv_x_channels, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
}
else {
//3*3 convolution filter desc
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, 4 * growthRate, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
//1*1 convolution filter desc
cudnnFilterDescriptor_t * localBottleneckFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localBottleneckFilterDesc, 4 * growthRate, conv_x_channels, 1, 1);
this->BC_filterDescriptorVec.push_back(localBottleneckFilterDesc);
}
//BN channel-wise Descriptor
int channelsBefore_self = initChannel + growthRate*i;
cudnnTensorDescriptor_t * BNparam = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(BNparam);
cudnn::setTensor4dDesc<Dtype>(BNparam, 1, channelsBefore_self, 1, 1);
this->tensorDescriptor_BN.push_back(BNparam);
//Dropout Ptr and Descriptor
if (useDropout) {
size_t * sizeState = new size_t[1];
size_t * sizeReserve = new size_t[1];
CUDNN_CHECK(cudnnDropoutGetStatesSize((*cudnnHandlePtr), sizeState));
CUDNN_CHECK(cudnnDropoutGetReserveSpaceSize(*tensorDescriptor_conv_y, sizeReserve));
dropout_reserveSize.push_back(sizeReserve[0]);
dropout_stateSize.push_back(sizeState[0]);
void* localStatePtr;
void* localReservePtr;
CUDA_CHECK(hipMalloc(&localStatePtr, sizeState[0]));
CUDA_CHECK(hipMalloc(&localReservePtr, sizeReserve[0]));
dropout_state_gpu.push_back(localStatePtr);
dropout_reserve_gpu.push_back(localReservePtr);
cudnnDropoutDescriptor_t* localDropoutDesc = new cudnnDropoutDescriptor_t;
cudnnCreateDropoutDescriptor(localDropoutDesc);
cudnnSetDropoutDescriptor(*localDropoutDesc, *cudnnHandlePtr, dropoutAmount, localStatePtr, sizeState[0], DB_randomSeed);
dropoutDescriptorVec.push_back(localDropoutDesc);
DB_randomSeed += 1;
}
//BC
if (useBC && (!BC_ultra_spaceEfficient)) {
Dtype* local_BC4G;
CUDA_CHECK(hipMalloc(&local_BC4G, quadG_numValues * sizeof(Dtype)));
hipMemset(local_BC4G, 0, quadG_numBytes);
postConv_4GVec.push_back(local_BC4G);
}
if (useBC) {
Dtype* BC_tmpMeanLocal;
Dtype* BC_tmpVarLocal;
int numChannel_BC = 4 * growthRate;
int byteChannel_BC = numChannel_BC * sizeof(Dtype);
CUDA_CHECK(hipMalloc(&BC_tmpMeanLocal, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(hipMalloc(&BC_tmpVarLocal, numChannel_BC * sizeof(Dtype)));
hipMemset(BC_tmpMeanLocal, 0, byteChannel_BC);
hipMemset(BC_tmpVarLocal, 0, byteChannel_BC);
BC_MeanInfVec.push_back(BC_tmpMeanLocal);
BC_VarInfVec.push_back(BC_tmpVarLocal);
Dtype* BC_localSaveMean;
Dtype* BC_localSaveInvVar;
CUDA_CHECK(hipMalloc(&BC_localSaveMean, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(hipMalloc(&BC_localSaveInvVar, numChannel_BC * sizeof(Dtype)));
hipMemset(BC_localSaveMean, 0, byteChannel_BC);
hipMemset(BC_localSaveInvVar, 0, byteChannel_BC);
ResultSaveMean_BC.push_back(BC_localSaveMean);
ResultSaveInvVariance_BC.push_back(BC_localSaveInvVar);
}
}
//Conv Descriptor
this->conv_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(this->conv_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*this->conv_Descriptor, 1, 1, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
//Mean and Var tmp
int totalNumChannel = this->initChannel + this->growthRate * this->numTransition;
CUDA_CHECK(hipMalloc(&this->Mean_tmp, totalNumChannel * sizeof(Dtype)));
CUDA_CHECK(hipMalloc(&this->Var_tmp, totalNumChannel * sizeof(Dtype)));
//Convolution Algorithms
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
cudnnTensorDescriptor_t conv_x_desc;
cudnnTensorDescriptor_t conv_y_desc;
cudnnFilterDescriptor_t conv_w_desc;
cudnnTensorDescriptor_t BC_x_desc;
cudnnTensorDescriptor_t BC_y_desc;
cudnnFilterDescriptor_t BC_w_desc;
if (useBC) {
conv_x_desc = *(quadG_tensorDesc);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
BC_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
BC_y_desc = *(quadG_tensorDesc);
BC_w_desc = *(BC_filterDescriptorVec[transitionIdx]);
}
else {
conv_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
}
//Conv Fwd Algo
cudnnConvolutionFwdAlgo_t* conv_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_w_desc, *conv_Descriptor, conv_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_FwdAlgo_local
));
conv_FwdAlgoVec.push_back(conv_FwdAlgo_local);
//Conv Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* conv_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_y_desc, *conv_Descriptor, conv_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdFilter_local
));
conv_BwdFilterAlgoVec.push_back(conv_BwdFilter_local);
//Conv Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* conv_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
conv_w_desc, conv_y_desc, *conv_Descriptor, conv_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdData_local
));
conv_BwdDataAlgoVec.push_back(conv_BwdData_local);
//BC Convolution
if (useBC) {
//BC Fwd Algo
cudnnConvolutionFwdAlgo_t* BC_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_w_desc, *convBC_Descriptor, BC_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_FwdAlgo_local
));
BC_FwdAlgoVec.push_back(BC_FwdAlgo_local);
//BC Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* BC_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_y_desc, *convBC_Descriptor, BC_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdFilter_local
));
BC_BwdFilterAlgoVec.push_back(BC_BwdFilter_local);
//BC Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* BC_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
BC_w_desc, BC_y_desc, *convBC_Descriptor, BC_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdData_local
));
BC_BwdDataAlgoVec.push_back(BC_BwdData_local);
}
}
}
template <typename Dtype>
void cleanupBuffer(Dtype* ptr_gpu, int count) {
hipMemset(ptr_gpu, 0, count * sizeof(Dtype));
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::LoopEndCleanup_gpu() {
int valsBuffer = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
cleanupBuffer(this->postConv_data_gpu, valsBuffer);
cleanupBuffer(this->postConv_grad_gpu, valsBuffer);
if (useDropout) {
cleanupBuffer(this->postDropout_data_gpu, valsBuffer);
cleanupBuffer(this->postDropout_grad_gpu, valsBuffer);
}
cleanupBuffer(this->postBN_data_gpu, valsBuffer);
cleanupBuffer(this->postBN_grad_gpu, valsBuffer);
cleanupBuffer(this->postReLU_data_gpu, valsBuffer);
cleanupBuffer(this->postReLU_grad_gpu, valsBuffer);
int vals4G = N * 4 * growthRate*H*W;
if (useBC) {
cleanupBuffer(postConv_4G_grad, vals4G);
cleanupBuffer(postBN_4G_grad, vals4G);
cleanupBuffer(postReLU_4G_grad, vals4G);
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::resetDropoutDesc() {
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
std::cout << &(dropout_state_gpu[transitionIdx]) << "," << dropout_stateSize[transitionIdx] << std::endl;
CUDNN_CHECK(cudnnSetDropoutDescriptor(
*(dropoutDescriptorVec[transitionIdx]),
*(this->cudnnHandlePtr),
dropoutAmount,
dropout_state_gpu[transitionIdx],
dropout_stateSize[transitionIdx],
DB_randomSeed
));
DB_randomSeed++;
}
}
__global__ void sync_streams() {}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#if 0
if (!this->gpuInited) {
//std::cout<<"Initializing GPU local"<<std::endl;
this->GPU_Initialization();
this->gpuInited = true;
//std::cout<< "GPUInited"<< std::endl;
}
#endif
clock_t begin_fwd = std::clock();//timer
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
//copy to bottom_data to buffer with stride
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
if ((this->phase_ == TRAIN) && useDropout) {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postDropout_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
else {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postConv_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
int work_n = this->N * (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
//work in the buffer, transition by transition
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
//BN Fwd
Dtype* BN_x_ptr;
if (this->phase_ == TRAIN && useDropout) {
BN_x_ptr = this->postDropout_data_gpu;
}
else {
BN_x_ptr = this->postConv_data_gpu;
}
Dtype* BN_y_ptr = this->postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t * BN_paramDesc = tensorDescriptor_BN[transitionIdx];
int numChannels = initChannel + growthRate*transitionIdx;
Dtype* local_MeanInf = this->Mean_tmp;
Dtype* local_VarInf = this->Var_tmp;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->gpu_data(),
BN_globalMean, BN_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
//update global Mean/Var manually
//Mean:
caffe_gpu_axpby(numChannels, EMA_decay, local_MeanInf, Dtype(1.0 - EMA_decay), BN_globalMean);
//Var:
caffe_gpu_axpby(numChannels, EMA_decay, local_VarInf, Dtype(1.0 - EMA_decay), BN_globalVar);
}
//ReLU
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Convolution 1*1 kernel
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G;
if (BC_ultra_spaceEfficient) {
conv_y_4G = postConv_4G;
}
else {
conv_y_4G = postConv_4GVec[transitionIdx];
}
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
//std::cout<<"BC Fwd Conv Done"<<std::endl;
//BN 4G Fwd
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* BN_BC_globalMean = this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_BC_globalVar = this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
//std::cout<<"BC Fwd BN Prepared"<<std::endl;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->gpu_data(),
BN_BC_globalMean, BN_BC_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_MeanInf, Dtype(1.0 - EMA_decay), BN_BC_globalMean);
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_VarInf, Dtype(1.0 - EMA_decay), BN_BC_globalVar);
}
//std::cout<<"BC Fwd BN Done"<<std::endl;
//ReLU 4G Fwd
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
//std::cout<<"BC Fwd ReLU Done"<<std::endl;
}
//Convolution
int delayChannel = this->initChannel + this->growthRate * transitionIdx;
Dtype* conv_x_local;
cudnnTensorDescriptor_t* conv_x_localDesc;
if (useBC) {
conv_x_local = postReLU_4G;
conv_x_localDesc = quadG_tensorDesc;
}
else {
conv_x_local = postReLU_data_gpu;
conv_x_localDesc = tensorDescriptorVec_conv_x[transitionIdx];
}
Dtype* conv_y_local = this->postConv_data_gpu + delayChannel * this->H * this->W;
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(filterDescriptorVec[transitionIdx]),
this->blobs_[transitionIdx]->gpu_data(),
*conv_Descriptor, *conv_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*(tensorDescriptor_conv_y), conv_y_local
)
);
//Dropout
if ((this->phase_ == TRAIN) && useDropout) {
Dtype* dropout_x_local = postConv_data_gpu + delayChannel*H*W;
Dtype* dropout_y_local = postDropout_data_gpu + delayChannel*H*W;
CUDNN_CHECK(cudnnDropoutForward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_x_local,
*tensorDescriptor_conv_y, dropout_y_local,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//this->logInternal_gpu("TClogFwd",transitionIdx,true,false);
}
//deploy top data
if ((this->phase_ == TRAIN) && useDropout) {
hipMemcpy(top[0]->mutable_gpu_data(), postDropout_data_gpu, work_n * sizeof(Dtype), hipMemcpyDeviceToDevice);
}
else {
hipMemcpy(top[0]->mutable_gpu_data(), postConv_data_gpu, work_n * sizeof(Dtype), hipMemcpyDeviceToDevice);
}
//clock_t end_fwd = std::clock();
//double elapsed_fwd = double(end_fwd - begin_fwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed fwd gpu:"<<elapsed_fwd<<std::endl;
//this->logInternal_gpu("TClogFwd",-1,false,false);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
#if 0
if (!this->gpuInited) {
this->GPU_Initialization();
this->gpuInited = true;
}
#endif
//clock_t begin_bwd = std::clock();
//assuming buffers store already computed value, always propagate down
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int work_n = N * (initChannel + growthRate*numTransition) * H * W;
//deploy top diff
if (useDropout) {
hipMemcpy(postDropout_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), hipMemcpyDeviceToDevice);
}
else {
hipMemcpy(postConv_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), hipMemcpyDeviceToDevice);
}
//Backward, transition by transition
for (int transitionIdx = this->numTransition - 1; transitionIdx >= 0; --transitionIdx) {
int channelsBefore_self = this->initChannel + transitionIdx * this->growthRate;
//Using BN & ReLU Fwd to generate corresponding postBN,postReLU data for this transition
//BN Fwd
Dtype* BN_x_ptr;
if (useDropout) {
BN_x_ptr = postDropout_data_gpu;
}
else {
BN_x_ptr = postConv_data_gpu;
}
Dtype* BN_y_ptr = postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t* BN_paramDesc = tensorDescriptor_BN[transitionIdx];
Dtype* local_MeanInf = Mean_tmp;
Dtype* local_VarInf = Var_tmp;
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr),CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition+transitionIdx]->gpu_data(),
this->blobs_[2*this->numTransition+transitionIdx]->gpu_data(),
local_MeanInf,local_VarInf,CUDNN_BN_MIN_EPSILON)
);*/
//ReLU Fwd
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Fwd phase
//If BC Ultra SpaceEfficient, then need convolution Fwd 1*1
//CONV_ALGO
if (BC_ultra_spaceEfficient) {
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G = postConv_4G;
CUDNN_CHECK(cudnnConvolutionForward(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
}
//cudnnHandle_t * localFwdHandle = BC_ultra_spaceEfficient?cudnnHandlePtr:extraHandles[0];//TODO
cudnnHandle_t * localFwdHandle = cudnnHandlePtr;
//BC BN Fwd reconstruction
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*localFwdHandle,CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc,BN_x_4G,
*quadG_tensorDesc,BN_y_4G,
*quadG_paramDesc,
this->blobs_[6*numTransition+transitionIdx]->gpu_data(),
this->blobs_[7*numTransition+transitionIdx]->gpu_data(),
localBC_MeanInf,localBC_VarInf,CUDNN_BN_MIN_EPSILON
));*/
//BC ReLU Fwd reconstruction
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*localFwdHandle, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
}
//CUDA_CHECK(hipStreamSynchronize(*(extraStreams[0])));
//sync_streams<<<1, 1>>>();
//Now do Bwd
//Dropout
if (useDropout) {
Dtype* dropout_dy_ptr = postDropout_grad_gpu + channelsBefore_self*H*W;
Dtype* dropout_dx_ptr = postConv_grad_gpu + channelsBefore_self*H*W;
CUDNN_CHECK(cudnnDropoutBackward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_dy_ptr,
*tensorDescriptor_conv_y, dropout_dx_ptr,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//Conv
Dtype* filterGrad_local = this->blobs_[transitionIdx]->mutable_gpu_diff();
Dtype* filterData_local = this->blobs_[transitionIdx]->mutable_gpu_data();
Dtype* conv_x_local = useBC ? postReLU_4G : postReLU_data_gpu;
Dtype* conv_dy_local = postConv_grad_gpu + channelsBefore_self * this->H * this->W;
Dtype* conv_dx_local = useBC ? postReLU_4G_grad : postReLU_grad_gpu;
cudnnTensorDescriptor_t * conv_x_localDesc = useBC ? quadG_tensorDesc : tensorDescriptorVec_conv_x[transitionIdx];
//Conv w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdFilterAlgoVec[transitionIdx],
this->workspace, this->workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterGrad_local
)
);
//Conv w.r.t. x
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(this->extraHandles[0]),
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterData_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdDataAlgoVec[transitionIdx],
this->workspace2, this->workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*conv_x_localDesc, conv_dx_local
)
);
sync_streams << <1, 1 >> > ();
if (useBC) {
//BC ReLU Bwd
Dtype* BC_ReLU_y_local = postReLU_4G;
Dtype* BC_ReLU_dy_local = postReLU_4G_grad;
Dtype* BC_ReLU_x_local = postBN_4G;
Dtype* BC_ReLU_dx_local = postBN_4G_grad;
CUDNN_CHECK(cudnnActivationBackward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, BC_ReLU_y_local,
*quadG_tensorDesc, BC_ReLU_dy_local,
*quadG_tensorDesc, BC_ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BC_ReLU_dx_local
));
//BC BN Bwd
Dtype* BC_BN_x_local = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BC_BN_dx_local = postConv_4G_grad;
Dtype* BC_BN_dy_local = postBN_4G_grad;
Dtype* BC_saveMean_local = ResultSaveMean_BC[transitionIdx];
Dtype* BC_saveInvVar_local = ResultSaveInvVariance_BC[transitionIdx];
// CUDNN_CHECK(
cudnnStatus_t sta =
cudnnBatchNormalizationBackward(
*cudnnHandlePtr,
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::zero,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::one,
#endif
*quadG_tensorDesc,
BC_BN_x_local,
*quadG_tensorDesc,
BC_BN_dy_local,
*quadG_tensorDesc,
BC_BN_dx_local,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON,
BC_saveMean_local,
BC_saveInvVar_local
);
//);
//BC Conv 1*1 Bwd
Dtype* BC_filterGrad = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff();
Dtype* BC_filterData = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BC_conv_x_local = postReLU_data_gpu;
Dtype* BC_conv_dy_local = postConv_4G_grad;
Dtype* BC_conv_dx_local = postReLU_grad_gpu;
//Conv Bwd w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_x_local,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdFilterAlgoVec[transitionIdx],
workspace, workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterGrad
));
//Conv Bwd w.r.t. data
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(extraHandles[0]),
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterData,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdDataAlgoVec[transitionIdx],
workspace2, workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_dx_local
));
sync_streams << <1, 1 >> > ();
}
//ReLU Bwd
Dtype* ReLU_y_local = postReLU_data_gpu;
Dtype* ReLU_x_local = postBN_data_gpu;
Dtype* ReLU_dy_local = postReLU_grad_gpu;
Dtype* ReLU_dx_local = postBN_grad_gpu;
CUDNN_CHECK(cudnnActivationBackward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dx_local)
);
//BN Bwd
Dtype* BN_x_local;
Dtype* BN_dx_local;
if (useDropout) {
BN_x_local = this->postDropout_data_gpu;
BN_dx_local = this->postDropout_grad_gpu;
}
else {
BN_x_local = this->postConv_data_gpu;
BN_dx_local = this->postConv_grad_gpu;
}
Dtype* BN_dy_local = this->postBN_grad_gpu;
Dtype* saveMean_local = this->ResultSaveMean_gpu[transitionIdx];
Dtype* saveInvVar_local = this->ResultSaveInvVariance_gpu[transitionIdx];
//CUDNN_CHECK(
cudnnBatchNormalizationBackward(*(this->cudnnHandlePtr),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#endif
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dx_local,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON, saveMean_local, saveInvVar_local
);
//);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,false);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,true);
}
//deploy buffer to bottom diff
//this->logInternal_gpu("TClogBwd",-1,false,false);
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
if (useDropout) {
gpu_copy_many_to_one(postDropout_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
//this->resetDropoutDesc();
}
else {
gpu_copy_many_to_one(postConv_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
}
int numTotalChannels = initChannel + growthRate*numTransition;
cleanupBuffer(this->Mean_tmp, numTotalChannels);
cleanupBuffer(this->Var_tmp, numTotalChannels);
this->LoopEndCleanup_gpu();
//clock_t end_bwd = std::clock();
//double elapsed_bwd = double(end_bwd - begin_bwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed bwd time:"<<elapsed_bwd<<std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu_public(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
this->Forward_gpu(bottom, top);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu_public(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
this->Backward_gpu(top, propagate_down, bottom);
}
template <typename Dtype>
void ReallocCudaMem(Dtype** p, int size)
{
hipFree(*p); *p = 0;
CUDA_CHECK(hipMalloc(p, size));
hipMemset(*p, 0, size);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::reshape_gpu_data(int oldh, int oldw,int oldn, int h, int w,int newn)
{
int bufferSize_byte_old = oldn*(this->initChannel + this->growthRate*this->numTransition)*oldh*oldw * sizeof(Dtype);
int bufferSize_byte_new = newn*(this->initChannel + this->growthRate*this->numTransition)*h*w * sizeof(Dtype);
if (bufferSize_byte_new > bufferSize_byte_old)
{
int bufferSize_byte = bufferSize_byte_new;
ReallocCudaMem(&this->postConv_data_gpu, bufferSize_byte);
if (useDropout) {
ReallocCudaMem(&this->postDropout_data_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postConv_grad_gpu, bufferSize_byte);
if (useDropout)
{
ReallocCudaMem(&this->postDropout_grad_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_grad_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_grad_gpu, bufferSize_byte);
}
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, newn, this->growthRate, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
int quadG_numValues_old = 4 * newn*growthRate*oldh*oldw;
int quadG_numValues = 4 * newn*growthRate*h*w;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (quadG_numValues > quadG_numValues_old)
{
if (useBC)
{
ReallocCudaMem(&postBN_4G, quadG_numBytes);
ReallocCudaMem(&postBN_4G_grad, quadG_numBytes);
ReallocCudaMem(&postReLU_4G, quadG_numBytes);
ReallocCudaMem(&postReLU_4G_grad, quadG_numBytes);
ReallocCudaMem(&postConv_4G_grad, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
ReallocCudaMem(&postConv_4G, quadG_numBytes);
}
}
}
if (useBC)
{
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, newn, 4 * growthRate, h, w, 4 * growthRate*h*w, h*w, w, 1);
}
for (int i = 0; i < this->numTransition; ++i)
{
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptorVec_conv_x[i], newn, conv_x_channels, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
}
}
template void DenseBlockLayer<float>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<double>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<float>::GPU_Initialization();
template void DenseBlockLayer<double>::GPU_Initialization();
INSTANTIATE_LAYER_GPU_FUNCS(DenseBlockLayer);
} // namespace caffe
| 14956aee705b6ece8442a2b209e1dea797ff59bf.cu | #include <time.h>
#include <vector>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <ctime>
//#include <dirent.h>
//#include <boost/lexical_cast.hpp>
//#include <boost/filesystem.hpp>
//#include <boost/algorithm/string.hpp>
#include "wstd/string.hpp"
#include "caffe/layers/denseblock_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/cudnn.hpp"
namespace caffe {
bool dirExists_cu(string dirStr) {
/* const char* dirCStr = dirStr.c_str();
DIR* dir = opendir(dirCStr);
if (ENOENT == errno){
return false;
}
closedir(dir);*/
return true;
}
void tryCreateDirectory_cu(string fileName) {
/* vector<string> strVec;
boost::split(strVec,fileName,boost::is_any_of("/"));
string newStr="";
for (int i=0;i<strVec.size()-1;++i){
newStr += strVec[i] + (i==strVec.size()-2?"":"/");
}
boost::filesystem::path dirToCreate(newStr);
if (!dirExists_cu(newStr)){
boost::filesystem::create_directories(dirToCreate);
}*/
}
string itos_cu(int i) {
char buf[32] = "";
//_itoa(i, buf, 10);
sprintf(buf,"%d", i);
return buf;
}
template <typename Dtype>
void gpu_copy_one_to_many(const Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_input, int chunkStride_output) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
const Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkSize_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkStride_output;
//printf("inpointer %p\n",inPtr_gpu);
//printf("outpointer %p\n",outPtr_gpu);
CUDA_CHECK(cudaMemcpy(outPtr_local, inPtr_local, chunkSize_input * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void gpu_copy_many_to_one(Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_output, int chunkStride_input) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkStride_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkSize_output;
CUDA_CHECK(cudaMemcpy(outPtr_local, inPtr_local, chunkSize_output * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void print_gpuPtr(Dtype* gpuPtr, int numValues) {
Dtype* cpuPtr = new Dtype[numValues];
cudaMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), cudaMemcpyDeviceToHost);
for (int i = 0; i < numValues; ++i) {
std::cout << cpuPtr[i] << ",";
}
std::cout << std::endl;
}
template <typename Dtype>
void log_gpuPtr(Dtype* gpuPtr, int numValues, string fileName) {
Dtype* cpuPtr = new Dtype[numValues];
cudaMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), cudaMemcpyDeviceToHost);
const char* fileName_cstr = fileName.c_str();
tryCreateDirectory_cu(fileName_cstr);
std::ofstream outWriter(fileName_cstr, std::ofstream::out);
for (int i = 0; i < numValues; ++i) {
outWriter << cpuPtr[i] << ",";
}
outWriter << std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::logInternal_gpu(string dir, int TIdx, bool logDynamic, bool logDiff) {
string localDir = dir + "/gpu_" + itos_cu(this->logId) + "/";
if (logDynamic) {
int postBufferSize = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
int quadGBufferSize = N * 4 * growthRate*H*W;
if (logDiff) {
//postConv_grad_gpu
log_gpuPtr<Dtype>(this->postConv_grad_gpu, postBufferSize, localDir + "postConv_grad_gpu_transition" + itos_cu(TIdx));
//postBN_grad_gpu
log_gpuPtr<Dtype>(this->postBN_grad_gpu, postBufferSize, localDir + "postBN_grad_gpu_transition" + itos_cu(TIdx));
//postReLU_grad_gpu
log_gpuPtr<Dtype>(this->postReLU_grad_gpu, postBufferSize, localDir + "postReLU_grad_gpu_transition" + itos_cu(TIdx));
//BC
if (useBC) {
//postConv_4G_grad
log_gpuPtr<Dtype>(this->postConv_4G_grad, quadGBufferSize, localDir + "postConv_4G_grad_transition" + itos_cu(TIdx));
//postBN_4G_grad
log_gpuPtr<Dtype>(this->postBN_4G_grad, quadGBufferSize, localDir + "postBN_4G_grad_transition" + itos_cu(TIdx));
//postReLU_4G_grad
log_gpuPtr<Dtype>(this->postReLU_4G_grad, quadGBufferSize, localDir + "postReLU_4G_grad_transition" + itos_cu(TIdx));
}
}
else {
//postConv_data_gpu
log_gpuPtr<Dtype>(this->postConv_data_gpu, postBufferSize, localDir + "postConv_data_gpu_transition" + itos_cu(TIdx));
//postBN_data_gpu
log_gpuPtr<Dtype>(this->postBN_data_gpu, postBufferSize, localDir + "postBN_data_gpu_transition" + itos_cu(TIdx));
//postReLU_data_gpu
log_gpuPtr<Dtype>(this->postReLU_data_gpu, postBufferSize, localDir + "postReLU_data_gpu_transition" + itos_cu(TIdx));
if (useBC) {
//postConv_4G
if (BC_ultra_spaceEfficient) {
log_gpuPtr<Dtype>(this->postConv_4G, quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
else {
log_gpuPtr<Dtype>(this->postConv_4GVec[TIdx], quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
//postBN_4G
log_gpuPtr<Dtype>(this->postBN_4G, quadGBufferSize, localDir + "postBN_4G_data_transition" + itos_cu(TIdx));
//postReLU_4G
log_gpuPtr<Dtype>(this->postReLU_4G, quadGBufferSize, localDir + "postReLU_4G_data_transition" + itos_cu(TIdx));
}
}
}
else {
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
int numChannel_moreWide = this->initChannel + this->growthRate * transitionIdx;
int numChannel_quadG = 4 * growthRate;
//global Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalVariance_gpu_transition" + itos_cu(transitionIdx));
//ResultSaveMean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveInvVariance_gpu_transition" + itos_cu(transitionIdx));
if (useBC) {
//global BC Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalVar_BC_transition" + itos_cu(transitionIdx));
//ResultSave BC Mean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveInvVariance_BC_transition" + itos_cu(transitionIdx));
}
//Filter_data/grad_gpu
int filterSize;
if (useBC) {
filterSize = 4 * growthRate*growthRate * 3 * 3;
}
else {
filterSize = (this->initChannel + this->growthRate*transitionIdx) * this->growthRate * 3 * 3;
}
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_data(), filterSize, localDir + "Filter_data_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_diff(), filterSize, localDir + "Filter_grad_gpu_" + itos_cu(transitionIdx));
//Scaler_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Scaler_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Scaler_data_gpu_" + itos_cu(transitionIdx));
//Bias_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Bias_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Bias_data_gpu_" + itos_cu(transitionIdx));
if (useBC) {
//BC Filter
int filterBC_size = (initChannel + growthRate*transitionIdx) * 4 * growthRate * 1 * 1;
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data(), filterBC_size, localDir + "Filter_data_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff(), filterBC_size, localDir + "Filter_grad_BC_" + itos_cu(transitionIdx));
//BC scaler
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Scaler_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Scaler_data_BC_" + itos_cu(transitionIdx));
//BC bias
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Bias_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Bias_data_BC_" + itos_cu(transitionIdx));
}
}
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::GPU_Initialization() {
//std::cout<<"Pre DeviceSet"<<std::endl;
//CUDA_CHECK(cudaSetDevice(1));
//std::cout<<"Post DeviceSet"<<std::endl;
//GPU intermediate ptrs
#if 1
int bufferSize_byte = this->N*(this->initChannel + this->growthRate*this->numTransition)*this->H*this->W * sizeof(Dtype);
CUDA_CHECK(cudaMalloc(&this->postConv_data_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(cudaMalloc(&this->postDropout_data_gpu, bufferSize_byte));
}
CUDA_CHECK(cudaMalloc(&this->postBN_data_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postReLU_data_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postConv_grad_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(cudaMalloc(&this->postDropout_grad_gpu, bufferSize_byte));
}
CUDA_CHECK(cudaMalloc(&this->postBN_grad_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postReLU_grad_gpu, bufferSize_byte));
cudaMemset(this->postConv_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postBN_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postReLU_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postConv_grad_gpu, 0, bufferSize_byte);
cudaMemset(this->postBN_grad_gpu, 0, bufferSize_byte);
cudaMemset(this->postReLU_grad_gpu, 0, bufferSize_byte);
#endif
//workspace
CUDA_CHECK(cudaMalloc(&this->workspace, this->workspace_size_bytes));
cudaMemset(this->workspace, 0, this->workspace_size_bytes);
CUDA_CHECK(cudaMalloc(&this->workspace2, this->workspace_size_bytes));
cudaMemset(this->workspace2, 0, this->workspace_size_bytes);
//handles and descriptors
//cudnn handle
this->cudnnHandlePtr = new cudnnHandle_t;
cudaPrimalStream = new cudaStream_t;
CUDNN_CHECK(cudnnCreate(this->cudnnHandlePtr));
CUDA_CHECK(cudaStreamCreate(cudaPrimalStream));
//CUDNN_CHECK(cudnnSetStream(*cudnnHandlePtr,*cudaPrimalStream));
int extraHandle_num = 3;
for (int i = 0; i < extraHandle_num; ++i) {
cudnnHandle_t* localHandle = new cudnnHandle_t;
cudaStream_t* localStream = new cudaStream_t;
CUDNN_CHECK(cudnnCreate(localHandle));
CUDA_CHECK(cudaStreamCreate(localStream));
CUDNN_CHECK(cudnnSetStream(*localHandle, *localStream));
extraHandles.push_back(localHandle);
extraStreams.push_back(localStream);
}
//ReLU Activation Descriptor
this->ReLUDesc = new cudnnActivationDescriptor_t;
cudnn::createActivationDescriptor<Dtype>(ReLUDesc, CUDNN_ACTIVATION_RELU);
//conv_y global tensor descriptor
this->tensorDescriptor_conv_y = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y);
#if 1
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, this->N, this->growthRate, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
#endif
//BC
int quadG_numValues = 4 * N*growthRate*H*W;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (useBC) {
#if 1
CUDA_CHECK(cudaMalloc(&postBN_4G, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postBN_4G_grad, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postReLU_4G, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postReLU_4G_grad, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postConv_4G_grad, quadG_numBytes));
cudaMemset(postBN_4G, 0, quadG_numBytes);
cudaMemset(postBN_4G_grad, 0, quadG_numBytes);
cudaMemset(postReLU_4G, 0, quadG_numBytes);
cudaMemset(postReLU_4G_grad, 0, quadG_numBytes);
cudaMemset(postConv_4G_grad, 0, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
CUDA_CHECK(cudaMalloc(&postConv_4G, quadG_numBytes));
cudaMemset(postConv_4G, 0, quadG_numBytes);
}
#endif
quadG_tensorDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_tensorDesc);
#if 1
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, N, 4 * growthRate, H, W, 4 * growthRate*H*W, H*W, W, 1);
#endif
quadG_paramDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_paramDesc);
cudnn::setTensor4dDesc<Dtype>(quadG_paramDesc, 1, 4 * growthRate, 1, 1, 4 * growthRate, 1, 1, 1);
convBC_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(convBC_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*convBC_Descriptor, 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
}
//per transition variables
for (int i = 0; i < this->numTransition; ++i) {
//Result Running/Saving Mean/Variance/InvVariance
int localChannel = this->initChannel + i * this->growthRate;
Dtype* local_SaveMean;
Dtype* local_SaveInvVar;
CUDA_CHECK(cudaMalloc(&local_SaveMean, localChannel * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&local_SaveInvVar, localChannel * sizeof(Dtype)));
cudaMemset(local_SaveMean, 0, localChannel * sizeof(Dtype));
cudaMemset(local_SaveInvVar, 0, localChannel * sizeof(Dtype));
this->ResultSaveMean_gpu.push_back(local_SaveMean);
this->ResultSaveInvVariance_gpu.push_back(local_SaveInvVar);
//conv_x descriptor
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnnTensorDescriptor_t * wide_Desc_local_x = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(wide_Desc_local_x);
#if 1
cudnn::setTensor4dDesc<Dtype>(wide_Desc_local_x, this->N, conv_x_channels, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
this->tensorDescriptorVec_conv_x.push_back(wide_Desc_local_x);
#endif
//filter Descriptor for Convolution
if (!useBC) {
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, conv_x_channels, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
}
else {
//3*3 convolution filter desc
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, 4 * growthRate, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
//1*1 convolution filter desc
cudnnFilterDescriptor_t * localBottleneckFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localBottleneckFilterDesc, 4 * growthRate, conv_x_channels, 1, 1);
this->BC_filterDescriptorVec.push_back(localBottleneckFilterDesc);
}
//BN channel-wise Descriptor
int channelsBefore_self = initChannel + growthRate*i;
cudnnTensorDescriptor_t * BNparam = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(BNparam);
cudnn::setTensor4dDesc<Dtype>(BNparam, 1, channelsBefore_self, 1, 1);
this->tensorDescriptor_BN.push_back(BNparam);
//Dropout Ptr and Descriptor
if (useDropout) {
size_t * sizeState = new size_t[1];
size_t * sizeReserve = new size_t[1];
CUDNN_CHECK(cudnnDropoutGetStatesSize((*cudnnHandlePtr), sizeState));
CUDNN_CHECK(cudnnDropoutGetReserveSpaceSize(*tensorDescriptor_conv_y, sizeReserve));
dropout_reserveSize.push_back(sizeReserve[0]);
dropout_stateSize.push_back(sizeState[0]);
void* localStatePtr;
void* localReservePtr;
CUDA_CHECK(cudaMalloc(&localStatePtr, sizeState[0]));
CUDA_CHECK(cudaMalloc(&localReservePtr, sizeReserve[0]));
dropout_state_gpu.push_back(localStatePtr);
dropout_reserve_gpu.push_back(localReservePtr);
cudnnDropoutDescriptor_t* localDropoutDesc = new cudnnDropoutDescriptor_t;
cudnnCreateDropoutDescriptor(localDropoutDesc);
cudnnSetDropoutDescriptor(*localDropoutDesc, *cudnnHandlePtr, dropoutAmount, localStatePtr, sizeState[0], DB_randomSeed);
dropoutDescriptorVec.push_back(localDropoutDesc);
DB_randomSeed += 1;
}
//BC
if (useBC && (!BC_ultra_spaceEfficient)) {
Dtype* local_BC4G;
CUDA_CHECK(cudaMalloc(&local_BC4G, quadG_numValues * sizeof(Dtype)));
cudaMemset(local_BC4G, 0, quadG_numBytes);
postConv_4GVec.push_back(local_BC4G);
}
if (useBC) {
Dtype* BC_tmpMeanLocal;
Dtype* BC_tmpVarLocal;
int numChannel_BC = 4 * growthRate;
int byteChannel_BC = numChannel_BC * sizeof(Dtype);
CUDA_CHECK(cudaMalloc(&BC_tmpMeanLocal, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&BC_tmpVarLocal, numChannel_BC * sizeof(Dtype)));
cudaMemset(BC_tmpMeanLocal, 0, byteChannel_BC);
cudaMemset(BC_tmpVarLocal, 0, byteChannel_BC);
BC_MeanInfVec.push_back(BC_tmpMeanLocal);
BC_VarInfVec.push_back(BC_tmpVarLocal);
Dtype* BC_localSaveMean;
Dtype* BC_localSaveInvVar;
CUDA_CHECK(cudaMalloc(&BC_localSaveMean, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&BC_localSaveInvVar, numChannel_BC * sizeof(Dtype)));
cudaMemset(BC_localSaveMean, 0, byteChannel_BC);
cudaMemset(BC_localSaveInvVar, 0, byteChannel_BC);
ResultSaveMean_BC.push_back(BC_localSaveMean);
ResultSaveInvVariance_BC.push_back(BC_localSaveInvVar);
}
}
//Conv Descriptor
this->conv_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(this->conv_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*this->conv_Descriptor, 1, 1, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
//Mean and Var tmp
int totalNumChannel = this->initChannel + this->growthRate * this->numTransition;
CUDA_CHECK(cudaMalloc(&this->Mean_tmp, totalNumChannel * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&this->Var_tmp, totalNumChannel * sizeof(Dtype)));
//Convolution Algorithms
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
cudnnTensorDescriptor_t conv_x_desc;
cudnnTensorDescriptor_t conv_y_desc;
cudnnFilterDescriptor_t conv_w_desc;
cudnnTensorDescriptor_t BC_x_desc;
cudnnTensorDescriptor_t BC_y_desc;
cudnnFilterDescriptor_t BC_w_desc;
if (useBC) {
conv_x_desc = *(quadG_tensorDesc);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
BC_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
BC_y_desc = *(quadG_tensorDesc);
BC_w_desc = *(BC_filterDescriptorVec[transitionIdx]);
}
else {
conv_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
}
//Conv Fwd Algo
cudnnConvolutionFwdAlgo_t* conv_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_w_desc, *conv_Descriptor, conv_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_FwdAlgo_local
));
conv_FwdAlgoVec.push_back(conv_FwdAlgo_local);
//Conv Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* conv_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_y_desc, *conv_Descriptor, conv_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdFilter_local
));
conv_BwdFilterAlgoVec.push_back(conv_BwdFilter_local);
//Conv Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* conv_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
conv_w_desc, conv_y_desc, *conv_Descriptor, conv_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdData_local
));
conv_BwdDataAlgoVec.push_back(conv_BwdData_local);
//BC Convolution
if (useBC) {
//BC Fwd Algo
cudnnConvolutionFwdAlgo_t* BC_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_w_desc, *convBC_Descriptor, BC_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_FwdAlgo_local
));
BC_FwdAlgoVec.push_back(BC_FwdAlgo_local);
//BC Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* BC_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_y_desc, *convBC_Descriptor, BC_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdFilter_local
));
BC_BwdFilterAlgoVec.push_back(BC_BwdFilter_local);
//BC Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* BC_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
BC_w_desc, BC_y_desc, *convBC_Descriptor, BC_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdData_local
));
BC_BwdDataAlgoVec.push_back(BC_BwdData_local);
}
}
}
template <typename Dtype>
void cleanupBuffer(Dtype* ptr_gpu, int count) {
cudaMemset(ptr_gpu, 0, count * sizeof(Dtype));
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::LoopEndCleanup_gpu() {
int valsBuffer = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
cleanupBuffer(this->postConv_data_gpu, valsBuffer);
cleanupBuffer(this->postConv_grad_gpu, valsBuffer);
if (useDropout) {
cleanupBuffer(this->postDropout_data_gpu, valsBuffer);
cleanupBuffer(this->postDropout_grad_gpu, valsBuffer);
}
cleanupBuffer(this->postBN_data_gpu, valsBuffer);
cleanupBuffer(this->postBN_grad_gpu, valsBuffer);
cleanupBuffer(this->postReLU_data_gpu, valsBuffer);
cleanupBuffer(this->postReLU_grad_gpu, valsBuffer);
int vals4G = N * 4 * growthRate*H*W;
if (useBC) {
cleanupBuffer(postConv_4G_grad, vals4G);
cleanupBuffer(postBN_4G_grad, vals4G);
cleanupBuffer(postReLU_4G_grad, vals4G);
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::resetDropoutDesc() {
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
std::cout << &(dropout_state_gpu[transitionIdx]) << "," << dropout_stateSize[transitionIdx] << std::endl;
CUDNN_CHECK(cudnnSetDropoutDescriptor(
*(dropoutDescriptorVec[transitionIdx]),
*(this->cudnnHandlePtr),
dropoutAmount,
dropout_state_gpu[transitionIdx],
dropout_stateSize[transitionIdx],
DB_randomSeed
));
DB_randomSeed++;
}
}
__global__ void sync_streams() {}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#if 0
if (!this->gpuInited) {
//std::cout<<"Initializing GPU local"<<std::endl;
this->GPU_Initialization();
this->gpuInited = true;
//std::cout<< "GPUInited"<< std::endl;
}
#endif
clock_t begin_fwd = std::clock();//timer
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
//copy to bottom_data to buffer with stride
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
if ((this->phase_ == TRAIN) && useDropout) {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postDropout_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
else {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postConv_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
int work_n = this->N * (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
//work in the buffer, transition by transition
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
//BN Fwd
Dtype* BN_x_ptr;
if (this->phase_ == TRAIN && useDropout) {
BN_x_ptr = this->postDropout_data_gpu;
}
else {
BN_x_ptr = this->postConv_data_gpu;
}
Dtype* BN_y_ptr = this->postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t * BN_paramDesc = tensorDescriptor_BN[transitionIdx];
int numChannels = initChannel + growthRate*transitionIdx;
Dtype* local_MeanInf = this->Mean_tmp;
Dtype* local_VarInf = this->Var_tmp;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->gpu_data(),
BN_globalMean, BN_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
//update global Mean/Var manually
//Mean:
caffe_gpu_axpby(numChannels, EMA_decay, local_MeanInf, Dtype(1.0 - EMA_decay), BN_globalMean);
//Var:
caffe_gpu_axpby(numChannels, EMA_decay, local_VarInf, Dtype(1.0 - EMA_decay), BN_globalVar);
}
//ReLU
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Convolution 1*1 kernel
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G;
if (BC_ultra_spaceEfficient) {
conv_y_4G = postConv_4G;
}
else {
conv_y_4G = postConv_4GVec[transitionIdx];
}
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
//std::cout<<"BC Fwd Conv Done"<<std::endl;
//BN 4G Fwd
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* BN_BC_globalMean = this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_BC_globalVar = this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
//std::cout<<"BC Fwd BN Prepared"<<std::endl;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->gpu_data(),
BN_BC_globalMean, BN_BC_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_MeanInf, Dtype(1.0 - EMA_decay), BN_BC_globalMean);
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_VarInf, Dtype(1.0 - EMA_decay), BN_BC_globalVar);
}
//std::cout<<"BC Fwd BN Done"<<std::endl;
//ReLU 4G Fwd
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
//std::cout<<"BC Fwd ReLU Done"<<std::endl;
}
//Convolution
int delayChannel = this->initChannel + this->growthRate * transitionIdx;
Dtype* conv_x_local;
cudnnTensorDescriptor_t* conv_x_localDesc;
if (useBC) {
conv_x_local = postReLU_4G;
conv_x_localDesc = quadG_tensorDesc;
}
else {
conv_x_local = postReLU_data_gpu;
conv_x_localDesc = tensorDescriptorVec_conv_x[transitionIdx];
}
Dtype* conv_y_local = this->postConv_data_gpu + delayChannel * this->H * this->W;
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(filterDescriptorVec[transitionIdx]),
this->blobs_[transitionIdx]->gpu_data(),
*conv_Descriptor, *conv_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*(tensorDescriptor_conv_y), conv_y_local
)
);
//Dropout
if ((this->phase_ == TRAIN) && useDropout) {
Dtype* dropout_x_local = postConv_data_gpu + delayChannel*H*W;
Dtype* dropout_y_local = postDropout_data_gpu + delayChannel*H*W;
CUDNN_CHECK(cudnnDropoutForward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_x_local,
*tensorDescriptor_conv_y, dropout_y_local,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//this->logInternal_gpu("TClogFwd",transitionIdx,true,false);
}
//deploy top data
if ((this->phase_ == TRAIN) && useDropout) {
cudaMemcpy(top[0]->mutable_gpu_data(), postDropout_data_gpu, work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(top[0]->mutable_gpu_data(), postConv_data_gpu, work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
//clock_t end_fwd = std::clock();
//double elapsed_fwd = double(end_fwd - begin_fwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed fwd gpu:"<<elapsed_fwd<<std::endl;
//this->logInternal_gpu("TClogFwd",-1,false,false);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
#if 0
if (!this->gpuInited) {
this->GPU_Initialization();
this->gpuInited = true;
}
#endif
//clock_t begin_bwd = std::clock();
//assuming buffers store already computed value, always propagate down
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int work_n = N * (initChannel + growthRate*numTransition) * H * W;
//deploy top diff
if (useDropout) {
cudaMemcpy(postDropout_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(postConv_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
//Backward, transition by transition
for (int transitionIdx = this->numTransition - 1; transitionIdx >= 0; --transitionIdx) {
int channelsBefore_self = this->initChannel + transitionIdx * this->growthRate;
//Using BN & ReLU Fwd to generate corresponding postBN,postReLU data for this transition
//BN Fwd
Dtype* BN_x_ptr;
if (useDropout) {
BN_x_ptr = postDropout_data_gpu;
}
else {
BN_x_ptr = postConv_data_gpu;
}
Dtype* BN_y_ptr = postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t* BN_paramDesc = tensorDescriptor_BN[transitionIdx];
Dtype* local_MeanInf = Mean_tmp;
Dtype* local_VarInf = Var_tmp;
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr),CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition+transitionIdx]->gpu_data(),
this->blobs_[2*this->numTransition+transitionIdx]->gpu_data(),
local_MeanInf,local_VarInf,CUDNN_BN_MIN_EPSILON)
);*/
//ReLU Fwd
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Fwd phase
//If BC Ultra SpaceEfficient, then need convolution Fwd 1*1
//CONV_ALGO
if (BC_ultra_spaceEfficient) {
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G = postConv_4G;
CUDNN_CHECK(cudnnConvolutionForward(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
}
//cudnnHandle_t * localFwdHandle = BC_ultra_spaceEfficient?cudnnHandlePtr:extraHandles[0];//TODO
cudnnHandle_t * localFwdHandle = cudnnHandlePtr;
//BC BN Fwd reconstruction
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*localFwdHandle,CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc,BN_x_4G,
*quadG_tensorDesc,BN_y_4G,
*quadG_paramDesc,
this->blobs_[6*numTransition+transitionIdx]->gpu_data(),
this->blobs_[7*numTransition+transitionIdx]->gpu_data(),
localBC_MeanInf,localBC_VarInf,CUDNN_BN_MIN_EPSILON
));*/
//BC ReLU Fwd reconstruction
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*localFwdHandle, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
}
//CUDA_CHECK(cudaStreamSynchronize(*(extraStreams[0])));
//sync_streams<<<1, 1>>>();
//Now do Bwd
//Dropout
if (useDropout) {
Dtype* dropout_dy_ptr = postDropout_grad_gpu + channelsBefore_self*H*W;
Dtype* dropout_dx_ptr = postConv_grad_gpu + channelsBefore_self*H*W;
CUDNN_CHECK(cudnnDropoutBackward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_dy_ptr,
*tensorDescriptor_conv_y, dropout_dx_ptr,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//Conv
Dtype* filterGrad_local = this->blobs_[transitionIdx]->mutable_gpu_diff();
Dtype* filterData_local = this->blobs_[transitionIdx]->mutable_gpu_data();
Dtype* conv_x_local = useBC ? postReLU_4G : postReLU_data_gpu;
Dtype* conv_dy_local = postConv_grad_gpu + channelsBefore_self * this->H * this->W;
Dtype* conv_dx_local = useBC ? postReLU_4G_grad : postReLU_grad_gpu;
cudnnTensorDescriptor_t * conv_x_localDesc = useBC ? quadG_tensorDesc : tensorDescriptorVec_conv_x[transitionIdx];
//Conv w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdFilterAlgoVec[transitionIdx],
this->workspace, this->workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterGrad_local
)
);
//Conv w.r.t. x
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(this->extraHandles[0]),
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterData_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdDataAlgoVec[transitionIdx],
this->workspace2, this->workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*conv_x_localDesc, conv_dx_local
)
);
sync_streams << <1, 1 >> > ();
if (useBC) {
//BC ReLU Bwd
Dtype* BC_ReLU_y_local = postReLU_4G;
Dtype* BC_ReLU_dy_local = postReLU_4G_grad;
Dtype* BC_ReLU_x_local = postBN_4G;
Dtype* BC_ReLU_dx_local = postBN_4G_grad;
CUDNN_CHECK(cudnnActivationBackward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, BC_ReLU_y_local,
*quadG_tensorDesc, BC_ReLU_dy_local,
*quadG_tensorDesc, BC_ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BC_ReLU_dx_local
));
//BC BN Bwd
Dtype* BC_BN_x_local = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BC_BN_dx_local = postConv_4G_grad;
Dtype* BC_BN_dy_local = postBN_4G_grad;
Dtype* BC_saveMean_local = ResultSaveMean_BC[transitionIdx];
Dtype* BC_saveInvVar_local = ResultSaveInvVariance_BC[transitionIdx];
// CUDNN_CHECK(
cudnnStatus_t sta =
cudnnBatchNormalizationBackward(
*cudnnHandlePtr,
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::zero,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::one,
#endif
*quadG_tensorDesc,
BC_BN_x_local,
*quadG_tensorDesc,
BC_BN_dy_local,
*quadG_tensorDesc,
BC_BN_dx_local,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON,
BC_saveMean_local,
BC_saveInvVar_local
);
//);
//BC Conv 1*1 Bwd
Dtype* BC_filterGrad = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff();
Dtype* BC_filterData = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BC_conv_x_local = postReLU_data_gpu;
Dtype* BC_conv_dy_local = postConv_4G_grad;
Dtype* BC_conv_dx_local = postReLU_grad_gpu;
//Conv Bwd w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_x_local,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdFilterAlgoVec[transitionIdx],
workspace, workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterGrad
));
//Conv Bwd w.r.t. data
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(extraHandles[0]),
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterData,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdDataAlgoVec[transitionIdx],
workspace2, workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_dx_local
));
sync_streams << <1, 1 >> > ();
}
//ReLU Bwd
Dtype* ReLU_y_local = postReLU_data_gpu;
Dtype* ReLU_x_local = postBN_data_gpu;
Dtype* ReLU_dy_local = postReLU_grad_gpu;
Dtype* ReLU_dx_local = postBN_grad_gpu;
CUDNN_CHECK(cudnnActivationBackward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dx_local)
);
//BN Bwd
Dtype* BN_x_local;
Dtype* BN_dx_local;
if (useDropout) {
BN_x_local = this->postDropout_data_gpu;
BN_dx_local = this->postDropout_grad_gpu;
}
else {
BN_x_local = this->postConv_data_gpu;
BN_dx_local = this->postConv_grad_gpu;
}
Dtype* BN_dy_local = this->postBN_grad_gpu;
Dtype* saveMean_local = this->ResultSaveMean_gpu[transitionIdx];
Dtype* saveInvVar_local = this->ResultSaveInvVariance_gpu[transitionIdx];
//CUDNN_CHECK(
cudnnBatchNormalizationBackward(*(this->cudnnHandlePtr),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#endif
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dx_local,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON, saveMean_local, saveInvVar_local
);
//);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,false);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,true);
}
//deploy buffer to bottom diff
//this->logInternal_gpu("TClogBwd",-1,false,false);
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
if (useDropout) {
gpu_copy_many_to_one(postDropout_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
//this->resetDropoutDesc();
}
else {
gpu_copy_many_to_one(postConv_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
}
int numTotalChannels = initChannel + growthRate*numTransition;
cleanupBuffer(this->Mean_tmp, numTotalChannels);
cleanupBuffer(this->Var_tmp, numTotalChannels);
this->LoopEndCleanup_gpu();
//clock_t end_bwd = std::clock();
//double elapsed_bwd = double(end_bwd - begin_bwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed bwd time:"<<elapsed_bwd<<std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu_public(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
this->Forward_gpu(bottom, top);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu_public(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
this->Backward_gpu(top, propagate_down, bottom);
}
template <typename Dtype>
void ReallocCudaMem(Dtype** p, int size)
{
cudaFree(*p); *p = 0;
CUDA_CHECK(cudaMalloc(p, size));
cudaMemset(*p, 0, size);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::reshape_gpu_data(int oldh, int oldw,int oldn, int h, int w,int newn)
{
int bufferSize_byte_old = oldn*(this->initChannel + this->growthRate*this->numTransition)*oldh*oldw * sizeof(Dtype);
int bufferSize_byte_new = newn*(this->initChannel + this->growthRate*this->numTransition)*h*w * sizeof(Dtype);
if (bufferSize_byte_new > bufferSize_byte_old)
{
int bufferSize_byte = bufferSize_byte_new;
ReallocCudaMem(&this->postConv_data_gpu, bufferSize_byte);
if (useDropout) {
ReallocCudaMem(&this->postDropout_data_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postConv_grad_gpu, bufferSize_byte);
if (useDropout)
{
ReallocCudaMem(&this->postDropout_grad_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_grad_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_grad_gpu, bufferSize_byte);
}
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, newn, this->growthRate, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
int quadG_numValues_old = 4 * newn*growthRate*oldh*oldw;
int quadG_numValues = 4 * newn*growthRate*h*w;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (quadG_numValues > quadG_numValues_old)
{
if (useBC)
{
ReallocCudaMem(&postBN_4G, quadG_numBytes);
ReallocCudaMem(&postBN_4G_grad, quadG_numBytes);
ReallocCudaMem(&postReLU_4G, quadG_numBytes);
ReallocCudaMem(&postReLU_4G_grad, quadG_numBytes);
ReallocCudaMem(&postConv_4G_grad, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
ReallocCudaMem(&postConv_4G, quadG_numBytes);
}
}
}
if (useBC)
{
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, newn, 4 * growthRate, h, w, 4 * growthRate*h*w, h*w, w, 1);
}
for (int i = 0; i < this->numTransition; ++i)
{
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptorVec_conv_x[i], newn, conv_x_channels, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
}
}
template void DenseBlockLayer<float>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<double>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<float>::GPU_Initialization();
template void DenseBlockLayer<double>::GPU_Initialization();
INSTANTIATE_LAYER_GPU_FUNCS(DenseBlockLayer);
} // namespace caffe
|
c6e827e5770b67e49cdb0b3851473064b0749cfa.hip | // !!! This is a file automatically generated by hipify!!!
/**
* CUDA Point Alignment
* George Stathopoulos, Jenny Lee, Mary Giambrone, 2019*/
#include <cstdio>
#include <stdio.h>
#include <fstream>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "helper_cuda.h"
#include <string>
#include <fstream>
#include "obj_structures.h"
// helper_cuda.h contains the error checking macros. note that they're called
// CUDA_CALL, CUBLAS_CALL, and CUSOLVER_CALL instead of the previous names
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
int main(int argc, char *argv[]) {
if (argc != 4)
{
printf("Usage: ./point_alignment [file1.obj] [file2.obj] [output.obj]\n");
return 1;
}
std::string filename, filename2, output_filename;
filename = argv[1];
filename2 = argv[2];
output_filename = argv[3];
std::cout << "Aligning " << filename << " with " << filename2 << std::endl;
Object obj1 = read_obj_file(filename);
std::cout << "Reading " << filename << ", which has " << obj1.vertices.size() << " vertices" << std::endl;
Object obj2 = read_obj_file(filename2);
std::cout << "Reading " << filename2 << ", which has " << obj2.vertices.size() << " vertices" << std::endl;
if (obj1.vertices.size() != obj2.vertices.size())
{
printf("Error: number of vertices in the obj files do not match.\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
// Loading in obj into vertex Array
///////////////////////////////////////////////////////////////////////////
int point_dim = 4; // 3 spatial + 1 homogeneous
int num_points = obj1.vertices.size();
// in col-major
float * x1mat = vertex_array_from_obj(obj1);
float * x2mat = vertex_array_from_obj(obj2);
///////////////////////////////////////////////////////////////////////////
// Point Alignment
///////////////////////////////////////////////////////////////////////////
// TODO: Initialize cublas handle
hipblasHandle_t handle;
CUBLAS_CALL(hipblasCreate(&handle));
float * dev_x1mat;
float * dev_x2mat;
float * dev_xx4x4;
float * dev_x1Tx2;
// TODO: Allocate device memory and copy over the data onto the device
// Hint: Use hipblasSetMatrix() for copying
// Leading dimensions
int ld1 = num_points; // leading dimension of x1 column-major
int ld2 = num_points; // leading dimension of x2 column-major
int ld3 = point_dim; // leading dimension of x1^(T)*x1 column-major
int ld4 = point_dim; // leading dimension of x1^(T)*x2 column-major
// Allocate the memory
CUDA_CALL(hipMalloc((void**)&dev_x1mat, ld1 * point_dim * sizeof(float))); // n by 4
CUDA_CALL(hipMalloc((void**)&dev_x2mat, ld2 * point_dim * sizeof(float))); // n by 4
CUDA_CALL(hipMalloc((void**)&dev_xx4x4, point_dim * point_dim * sizeof(float))); // 4 by 4
CUDA_CALL(hipMalloc((void**)&dev_x1Tx2, point_dim * point_dim * sizeof(float))); // 4 by 4
CUBLAS_CALL(hipblasSetMatrix(num_points, point_dim, sizeof(float), x1mat, ld1, dev_x1mat, ld1));
CUBLAS_CALL(hipblasSetMatrix(num_points, point_dim, sizeof(float), x2mat, ld2, dev_x2mat, ld2));
// Now, proceed with the computations necessary to solve for the linear
// transformation.
float one = 1;
float zero = 0;
// cuBLAS transpose or no transpose operations.
hipblasOperation_t transOn = HIPBLAS_OP_T;
hipblasOperation_t transOff = HIPBLAS_OP_N;
// TODO: First calculate xx4x4 and x1Tx2
// Following two calls should correspond to:
// xx4x4 = Transpose[x1mat] . x1mat
// In English...
// Simple matrix matrix mulitplication sgemm(handle, transpose condition,transpose condition, output m, output n,
// inner dimension, one = no addition of C afterwards, x1 data, leading dim x1, x1 data, leading dim x1,
// no addition of x1Tx1, leading dim x1Tx1).
CUBLAS_CALL(hipblasSgemm(handle, transOn, transOff, point_dim, point_dim, num_points,
&one, dev_x1mat, ld1, dev_x1mat, ld1, &zero, dev_xx4x4, ld3));
// x1Tx2 = Transpose[x1mat] . x2mat
CUBLAS_CALL(hipblasSgemm(handle, transOn, transOff, point_dim, point_dim, num_points,
&one, dev_x1mat, ld1, dev_x2mat, ld2, &zero, dev_x1Tx2, ld4));
// TODO: Finally, solve the system using LU-factorization! We're solving
// xx4x4 . m4x4mat.T = x1Tx2 i.e. m4x4mat.T = Inverse[xx4x4] . x1Tx2
//
// Factorize xx4x4 into an L and U matrix, ie. xx4x4 = LU
//
// Then, solve the following two systems at once using cusolver's getrs
// L . temp = P . x1Tx2
// And then then,
// U . m4x4mat = temp
//
// Generally, pre-factoring a matrix is a very good strategy when
// it is needed for repeated solves.
// TODO: Make handle for cuSolver
hipsolverDnHandle_t solver_handle;
CUSOLVER_CALL(hipsolverDnCreate(&solver_handle));
// TODO: Initialize work buffer using hipsolverDnSgetrf_bufferSize
float * work;
int Lwork;
CUSOLVER_CALL(hipsolverDnSgetrf_bufferSize(solver_handle, point_dim, point_dim, dev_xx4x4, point_dim, &Lwork));
// TODO: compute buffer size and prepare memory
CUDA_CALL(hipMalloc((void**)&work, Lwork * sizeof(float)));
// TODO: Initialize memory for pivot array, with a size of point_dim
int * pivots;
CUDA_CALL(hipMalloc((void**)&pivots, point_dim * sizeof(int))); // 4
int * info;
CUDA_CALL(hipMalloc((void**)&info, sizeof(int))); // 1
// TODO: Now, call the factorizer hipsolverDnSgetrf, using the above initialized data
CUSOLVER_CALL(hipsolverDnSgetrf(solver_handle, point_dim, point_dim, dev_xx4x4, point_dim,
work, pivots, info));
// TODO: Finally, solve the factorized version using a direct call to hipsolverDnSgetrs
CUSOLVER_CALL(hipsolverDnSgetrs(solver_handle, transOff, point_dim, point_dim, dev_xx4x4, point_dim,
pivots, dev_x1Tx2, point_dim, info));
// TODO: Destroy the cuSolver handle
CUSOLVER_CALL(hipsolverDnDestroy(solver_handle));
CUDA_CALL(hipFree(work));
CUDA_CALL(hipFree(pivots));
CUDA_CALL(hipFree(info));
// TODO: Copy final transformation back to host. Note that at this point
// the transformation matrix is transposed
float * out_transformation = (float *)malloc(point_dim * point_dim * sizeof(float));
CUBLAS_CALL(hipblasGetVector(point_dim * point_dim, sizeof(float), dev_x1Tx2, 1, out_transformation, 1));
// CUDA_CALL(hipMemcpy(out_transformation, dev_x1Tx2, sizeof(float) * point_dim * point_dim,
// hipMemcpyDeviceToHost));
// TODO: Don't forget to set the bottom row of the final transformation
// to [0,0,0,1] (right-most columns of the transposed matrix)
for (int i = 0; i < 3; i++) {
out_transformation[IDX2C(i,4,4)] = 0;
}
out_transformation[IDX2C(4,4,4)] = 1;
// Print transformation in row order.
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
std::cout << out_transformation[i * point_dim + j] << " ";
}
std::cout << "\n";
}
///////////////////////////////////////////////////////////////////////////
// Transform point and print output object file
///////////////////////////////////////////////////////////////////////////
std::cout << "check 1 " << point_dim << " " << num_points << std::endl;
// TODO Allocate and Initialize data matrix
float * dev_pt;
CUDA_CALL(hipMalloc((void**)&dev_pt, point_dim * num_points * sizeof(float))); // n by 4
CUBLAS_CALL(hipblasSetMatrix(num_points, point_dim, sizeof(float), x1mat, num_points, dev_pt, num_points));
std::cout << "check 1" << std::endl;
// TODO Allocate and Initialize transformation matrix
float * dev_trans_mat;
CUDA_CALL(hipMalloc((void**)&dev_trans_mat, point_dim * point_dim * sizeof(float))); // 4 by 4
CUBLAS_CALL(hipblasSetMatrix(point_dim, point_dim, sizeof(float),
out_transformation, point_dim, dev_trans_mat, point_dim));
std::cout << "check 1" << std::endl;
// TODO Allocate and Initialize transformed points
float * dev_trans_pt;
CUDA_CALL(hipMalloc((void**)&dev_trans_pt, point_dim * num_points * sizeof(float))); // n by 4
std::cout << "check 1" << std::endl;
float one_d = 1;
float zero_d = 0;
// TODO Transform point matrix
// (4x4 trans_mat) . (nx4 pointzx matrix)^T = (4xn transformed points)
CUBLAS_CALL(hipblasSgemm(handle, transOn, transOn, point_dim, num_points, point_dim,
&one_d, dev_trans_mat, point_dim, dev_pt, num_points, &zero_d, dev_trans_pt, point_dim));
std::cout << "check 1" << std::endl;
// So now dev_trans_pt has shape (4 x n)
float * trans_pt = (float *)malloc(num_points * point_dim * sizeof(float));
CUDA_CALL(hipMemcpy(trans_pt, dev_trans_pt, sizeof(float) * num_points * point_dim,
hipMemcpyDeviceToHost));
std::cout << "check 1" << std::endl;
// get Object from transformed vertex matrix
Object trans_obj = obj_from_vertex_array(trans_pt, num_points, point_dim, obj1);
// print Object to output file
std::ofstream obj_file (output_filename);
print_obj_data(trans_obj, obj_file);
// free CPU memory
free(trans_pt);
///////////////////////////////////////////////////////////////////////////
// Free Memory
///////////////////////////////////////////////////////////////////////////
// TODO: Free GPU memory
CUDA_CALL(hipFree(dev_x1mat));
CUDA_CALL(hipFree(dev_x2mat));
CUDA_CALL(hipFree(dev_xx4x4));
CUDA_CALL(hipFree(dev_x1Tx2));
CUDA_CALL(hipFree(dev_pt));
CUDA_CALL(hipFree(dev_trans_mat));
CUDA_CALL(hipFree(dev_trans_pt));
CUBLAS_CALL(hipblasDestroy(handle));
// TODO: Free CPU memory
free(out_transformation);
free(x1mat);
free(x2mat);
}
| c6e827e5770b67e49cdb0b3851473064b0749cfa.cu | /**
* CUDA Point Alignment
* George Stathopoulos, Jenny Lee, Mary Giambrone, 2019*/
#include <cstdio>
#include <stdio.h>
#include <fstream>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "helper_cuda.h"
#include <string>
#include <fstream>
#include "obj_structures.h"
// helper_cuda.h contains the error checking macros. note that they're called
// CUDA_CALL, CUBLAS_CALL, and CUSOLVER_CALL instead of the previous names
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
int main(int argc, char *argv[]) {
if (argc != 4)
{
printf("Usage: ./point_alignment [file1.obj] [file2.obj] [output.obj]\n");
return 1;
}
std::string filename, filename2, output_filename;
filename = argv[1];
filename2 = argv[2];
output_filename = argv[3];
std::cout << "Aligning " << filename << " with " << filename2 << std::endl;
Object obj1 = read_obj_file(filename);
std::cout << "Reading " << filename << ", which has " << obj1.vertices.size() << " vertices" << std::endl;
Object obj2 = read_obj_file(filename2);
std::cout << "Reading " << filename2 << ", which has " << obj2.vertices.size() << " vertices" << std::endl;
if (obj1.vertices.size() != obj2.vertices.size())
{
printf("Error: number of vertices in the obj files do not match.\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
// Loading in obj into vertex Array
///////////////////////////////////////////////////////////////////////////
int point_dim = 4; // 3 spatial + 1 homogeneous
int num_points = obj1.vertices.size();
// in col-major
float * x1mat = vertex_array_from_obj(obj1);
float * x2mat = vertex_array_from_obj(obj2);
///////////////////////////////////////////////////////////////////////////
// Point Alignment
///////////////////////////////////////////////////////////////////////////
// TODO: Initialize cublas handle
cublasHandle_t handle;
CUBLAS_CALL(cublasCreate(&handle));
float * dev_x1mat;
float * dev_x2mat;
float * dev_xx4x4;
float * dev_x1Tx2;
// TODO: Allocate device memory and copy over the data onto the device
// Hint: Use cublasSetMatrix() for copying
// Leading dimensions
int ld1 = num_points; // leading dimension of x1 column-major
int ld2 = num_points; // leading dimension of x2 column-major
int ld3 = point_dim; // leading dimension of x1^(T)*x1 column-major
int ld4 = point_dim; // leading dimension of x1^(T)*x2 column-major
// Allocate the memory
CUDA_CALL(cudaMalloc((void**)&dev_x1mat, ld1 * point_dim * sizeof(float))); // n by 4
CUDA_CALL(cudaMalloc((void**)&dev_x2mat, ld2 * point_dim * sizeof(float))); // n by 4
CUDA_CALL(cudaMalloc((void**)&dev_xx4x4, point_dim * point_dim * sizeof(float))); // 4 by 4
CUDA_CALL(cudaMalloc((void**)&dev_x1Tx2, point_dim * point_dim * sizeof(float))); // 4 by 4
CUBLAS_CALL(cublasSetMatrix(num_points, point_dim, sizeof(float), x1mat, ld1, dev_x1mat, ld1));
CUBLAS_CALL(cublasSetMatrix(num_points, point_dim, sizeof(float), x2mat, ld2, dev_x2mat, ld2));
// Now, proceed with the computations necessary to solve for the linear
// transformation.
float one = 1;
float zero = 0;
// cuBLAS transpose or no transpose operations.
cublasOperation_t transOn = CUBLAS_OP_T;
cublasOperation_t transOff = CUBLAS_OP_N;
// TODO: First calculate xx4x4 and x1Tx2
// Following two calls should correspond to:
// xx4x4 = Transpose[x1mat] . x1mat
// In English...
// Simple matrix matrix mulitplication sgemm(handle, transpose condition,transpose condition, output m, output n,
// inner dimension, one = no addition of C afterwards, x1 data, leading dim x1, x1 data, leading dim x1,
// no addition of x1Tx1, leading dim x1Tx1).
CUBLAS_CALL(cublasSgemm(handle, transOn, transOff, point_dim, point_dim, num_points,
&one, dev_x1mat, ld1, dev_x1mat, ld1, &zero, dev_xx4x4, ld3));
// x1Tx2 = Transpose[x1mat] . x2mat
CUBLAS_CALL(cublasSgemm(handle, transOn, transOff, point_dim, point_dim, num_points,
&one, dev_x1mat, ld1, dev_x2mat, ld2, &zero, dev_x1Tx2, ld4));
// TODO: Finally, solve the system using LU-factorization! We're solving
// xx4x4 . m4x4mat.T = x1Tx2 i.e. m4x4mat.T = Inverse[xx4x4] . x1Tx2
//
// Factorize xx4x4 into an L and U matrix, ie. xx4x4 = LU
//
// Then, solve the following two systems at once using cusolver's getrs
// L . temp = P . x1Tx2
// And then then,
// U . m4x4mat = temp
//
// Generally, pre-factoring a matrix is a very good strategy when
// it is needed for repeated solves.
// TODO: Make handle for cuSolver
cusolverDnHandle_t solver_handle;
CUSOLVER_CALL(cusolverDnCreate(&solver_handle));
// TODO: Initialize work buffer using cusolverDnSgetrf_bufferSize
float * work;
int Lwork;
CUSOLVER_CALL(cusolverDnSgetrf_bufferSize(solver_handle, point_dim, point_dim, dev_xx4x4, point_dim, &Lwork));
// TODO: compute buffer size and prepare memory
CUDA_CALL(cudaMalloc((void**)&work, Lwork * sizeof(float)));
// TODO: Initialize memory for pivot array, with a size of point_dim
int * pivots;
CUDA_CALL(cudaMalloc((void**)&pivots, point_dim * sizeof(int))); // 4
int * info;
CUDA_CALL(cudaMalloc((void**)&info, sizeof(int))); // 1
// TODO: Now, call the factorizer cusolverDnSgetrf, using the above initialized data
CUSOLVER_CALL(cusolverDnSgetrf(solver_handle, point_dim, point_dim, dev_xx4x4, point_dim,
work, pivots, info));
// TODO: Finally, solve the factorized version using a direct call to cusolverDnSgetrs
CUSOLVER_CALL(cusolverDnSgetrs(solver_handle, transOff, point_dim, point_dim, dev_xx4x4, point_dim,
pivots, dev_x1Tx2, point_dim, info));
// TODO: Destroy the cuSolver handle
CUSOLVER_CALL(cusolverDnDestroy(solver_handle));
CUDA_CALL(cudaFree(work));
CUDA_CALL(cudaFree(pivots));
CUDA_CALL(cudaFree(info));
// TODO: Copy final transformation back to host. Note that at this point
// the transformation matrix is transposed
float * out_transformation = (float *)malloc(point_dim * point_dim * sizeof(float));
CUBLAS_CALL(cublasGetVector(point_dim * point_dim, sizeof(float), dev_x1Tx2, 1, out_transformation, 1));
// CUDA_CALL(cudaMemcpy(out_transformation, dev_x1Tx2, sizeof(float) * point_dim * point_dim,
// cudaMemcpyDeviceToHost));
// TODO: Don't forget to set the bottom row of the final transformation
// to [0,0,0,1] (right-most columns of the transposed matrix)
for (int i = 0; i < 3; i++) {
out_transformation[IDX2C(i,4,4)] = 0;
}
out_transformation[IDX2C(4,4,4)] = 1;
// Print transformation in row order.
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
std::cout << out_transformation[i * point_dim + j] << " ";
}
std::cout << "\n";
}
///////////////////////////////////////////////////////////////////////////
// Transform point and print output object file
///////////////////////////////////////////////////////////////////////////
std::cout << "check 1 " << point_dim << " " << num_points << std::endl;
// TODO Allocate and Initialize data matrix
float * dev_pt;
CUDA_CALL(cudaMalloc((void**)&dev_pt, point_dim * num_points * sizeof(float))); // n by 4
CUBLAS_CALL(cublasSetMatrix(num_points, point_dim, sizeof(float), x1mat, num_points, dev_pt, num_points));
std::cout << "check 1" << std::endl;
// TODO Allocate and Initialize transformation matrix
float * dev_trans_mat;
CUDA_CALL(cudaMalloc((void**)&dev_trans_mat, point_dim * point_dim * sizeof(float))); // 4 by 4
CUBLAS_CALL(cublasSetMatrix(point_dim, point_dim, sizeof(float),
out_transformation, point_dim, dev_trans_mat, point_dim));
std::cout << "check 1" << std::endl;
// TODO Allocate and Initialize transformed points
float * dev_trans_pt;
CUDA_CALL(cudaMalloc((void**)&dev_trans_pt, point_dim * num_points * sizeof(float))); // n by 4
std::cout << "check 1" << std::endl;
float one_d = 1;
float zero_d = 0;
// TODO Transform point matrix
// (4x4 trans_mat) . (nx4 pointzx matrix)^T = (4xn transformed points)
CUBLAS_CALL(cublasSgemm(handle, transOn, transOn, point_dim, num_points, point_dim,
&one_d, dev_trans_mat, point_dim, dev_pt, num_points, &zero_d, dev_trans_pt, point_dim));
std::cout << "check 1" << std::endl;
// So now dev_trans_pt has shape (4 x n)
float * trans_pt = (float *)malloc(num_points * point_dim * sizeof(float));
CUDA_CALL(cudaMemcpy(trans_pt, dev_trans_pt, sizeof(float) * num_points * point_dim,
cudaMemcpyDeviceToHost));
std::cout << "check 1" << std::endl;
// get Object from transformed vertex matrix
Object trans_obj = obj_from_vertex_array(trans_pt, num_points, point_dim, obj1);
// print Object to output file
std::ofstream obj_file (output_filename);
print_obj_data(trans_obj, obj_file);
// free CPU memory
free(trans_pt);
///////////////////////////////////////////////////////////////////////////
// Free Memory
///////////////////////////////////////////////////////////////////////////
// TODO: Free GPU memory
CUDA_CALL(cudaFree(dev_x1mat));
CUDA_CALL(cudaFree(dev_x2mat));
CUDA_CALL(cudaFree(dev_xx4x4));
CUDA_CALL(cudaFree(dev_x1Tx2));
CUDA_CALL(cudaFree(dev_pt));
CUDA_CALL(cudaFree(dev_trans_mat));
CUDA_CALL(cudaFree(dev_trans_pt));
CUBLAS_CALL(cublasDestroy(handle));
// TODO: Free CPU memory
free(out_transformation);
free(x1mat);
free(x2mat);
}
|
ceaa4e89c71ded369a56bf4d4049f292432de13a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "../shape/head.h"
}
__global__ void dbg_copy_facet_normals_krnl(struct mod_t *dmod, int nf, float3 *dnormals)
{
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
if (f<nf) {
dnormals[f].x = __double2float_rn(dmod->shape.comp[0].real.f[f].n[0]);
dnormals[f].y = __double2float_rn(dmod->shape.comp[0].real.f[f].n[1]);
dnormals[f].z = __double2float_rn(dmod->shape.comp[0].real.f[f].n[2]);
}
}
__host__ void dbg_print_facet_normals_host(struct mod_t *mod, const char *fn) {
/* This debug function prints all facet normals in a given model */
int nf;
FILE *fp_n;
nf = mod->shape.comp[0].real.nf;
fp_n = fopen(fn, "w+");
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[0]);
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[1]);
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[2]);
}
fclose(fp_n);
}
__host__ void dbg_print_facet_normals(struct mod_t *dmod, int nf, const char *fn) {
/* This debug function prints all facet normals in a given model */
FILE *fp_n;
float3 *dnormals, *hnormals;
dim3 BLK,THD;
fp_n = fopen(fn, "w+");
/* Allocate memory */
gpuErrchk(hipMalloc((void**)&dnormals, sizeof(float3) * nf));
hnormals = (float3 *) malloc(nf*sizeof(float3));
THD.x = maxThreadsPerBlock;
BLK.x = floor((THD.x - 1 + nf)/THD.x);
hipLaunchKernelGGL(( dbg_copy_facet_normals_krnl), dim3(BLK),dim3(THD), 0, 0, dmod, nf, dnormals);
checkErrorAfterKernelLaunch("copy_facet_normals_krnl");
gpuErrchk(hipMemcpy(hnormals, dnormals, sizeof(float3)*nf, hipMemcpyDeviceToHost));
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].x);
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].y);
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].z);
}
fclose(fp_n);
}
__host__ void dbg_print_facet_normals_dbl3(double3 *normals, int nf, const char *fn) {
/* This debug function prints all facet normals in a given model */
FILE *fp_n;
fp_n = fopen(fn, "w+");
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, normals[f].x);
fprintf(fp_n, "%i, %g, \n", f, normals[f].y);
fprintf(fp_n, "%i, %g, \n", f, normals[f].z);
}
fclose(fp_n);
}
| ceaa4e89c71ded369a56bf4d4049f292432de13a.cu |
extern "C" {
#include "../shape/head.h"
}
__global__ void dbg_copy_facet_normals_krnl(struct mod_t *dmod, int nf, float3 *dnormals)
{
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
if (f<nf) {
dnormals[f].x = __double2float_rn(dmod->shape.comp[0].real.f[f].n[0]);
dnormals[f].y = __double2float_rn(dmod->shape.comp[0].real.f[f].n[1]);
dnormals[f].z = __double2float_rn(dmod->shape.comp[0].real.f[f].n[2]);
}
}
__host__ void dbg_print_facet_normals_host(struct mod_t *mod, const char *fn) {
/* This debug function prints all facet normals in a given model */
int nf;
FILE *fp_n;
nf = mod->shape.comp[0].real.nf;
fp_n = fopen(fn, "w+");
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[0]);
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[1]);
fprintf(fp_n, "%i, %g, \n", f, mod->shape.comp[0].real.f[f].n[2]);
}
fclose(fp_n);
}
__host__ void dbg_print_facet_normals(struct mod_t *dmod, int nf, const char *fn) {
/* This debug function prints all facet normals in a given model */
FILE *fp_n;
float3 *dnormals, *hnormals;
dim3 BLK,THD;
fp_n = fopen(fn, "w+");
/* Allocate memory */
gpuErrchk(cudaMalloc((void**)&dnormals, sizeof(float3) * nf));
hnormals = (float3 *) malloc(nf*sizeof(float3));
THD.x = maxThreadsPerBlock;
BLK.x = floor((THD.x - 1 + nf)/THD.x);
dbg_copy_facet_normals_krnl<<<BLK,THD>>>(dmod, nf, dnormals);
checkErrorAfterKernelLaunch("copy_facet_normals_krnl");
gpuErrchk(cudaMemcpy(hnormals, dnormals, sizeof(float3)*nf, cudaMemcpyDeviceToHost));
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].x);
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].y);
fprintf(fp_n, "%i, %g, \n", f, hnormals[f].z);
}
fclose(fp_n);
}
__host__ void dbg_print_facet_normals_dbl3(double3 *normals, int nf, const char *fn) {
/* This debug function prints all facet normals in a given model */
FILE *fp_n;
fp_n = fopen(fn, "w+");
/* Print top row */
fprintf(fp_n, ", value, \n");
for (int f=0; f<nf; f++) {
fprintf(fp_n, "%i, %g, \n", f, normals[f].x);
fprintf(fp_n, "%i, %g, \n", f, normals[f].y);
fprintf(fp_n, "%i, %g, \n", f, normals[f].z);
}
fclose(fp_n);
}
|
4db36d66e205d59e10d7e50cbc74b8c8a363c5b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#define TILE_SIZE 5900
#define NTHREADS 256
// 1,2,3,4,5,6 -> 2,3,4,6,1,5
static const int d1 = 41, d2 = 13, d3 = 11, d4 = 9, d5 = 76, d6 = 50;
static const int data_size = d1 * d2 * d3 * d4 * d5 * d6;
static int repeat = 1;
static const int shape_output[] = {d2, d3, d1};
static const int shape_input[] = {d4, d5, d6};
static const float shape_output_r[] = {1.f / d2, 1.f / d3, 1.f / d1};
static const float shape_input_r[] = {1.f / d4, 1.f / d5, 1.f / d6};
static const int stride_output_local[] = {d1, d1 * d2, 1};
static const int stride_output_global[] = {1, d2, d2 * d3 * d4 * d6};
static const int stride_input[] = {d2 * d3, d2 * d3 * d4 * d6 * d1, d2 * d3 * d4};
void verify(double *input, double *output) {
int input_offset = 2 + d1 * (2 + d2 * (2 + d3 * (2 + d4 * (0 + 2 * d5))));
int output_offset = 2 + d2 * (2 + d3 * (2 + d4 * (2 + d6 * (2 + 0 * d1))));
bool error = false;
for (size_t i = 0; i < d5; i++) {
if (input[input_offset + i * d1 * d2 * d3 * d4] !=
output[output_offset + i * d2 * d3 * d4 * d6 * d1]) {
printf("FAIL\n");
error = true;
break;
}
}
if (!error) printf("PASS\n");
}
__global__ void tensor_transpose(
const int dim_input,
const int dim_output,
const int nblocks,
const int tile_size,
const int *shape_input,
const int *shape_output,
const float *shape_input_r,
const float *shape_output_r,
const int *stride_input,
const int *stride_output_local,
const int *stride_output_global,
const double *input,
double *output)
{
__shared__ double tile[TILE_SIZE];
for (int block_idx = blockIdx.x; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
for (int i = 0; i < dim_input; i++) {
im = it * shape_input_r[i]; // replace division with multiplication
offset1 += stride_input[i] * (it - im * shape_input[i]);
it = im;
}
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
tile[i] = input[i + block_idx * tile_size];
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it * shape_output_r[j]; // replace division with multiplication
int tmp = it - im * shape_output[j];
offset2 += stride_output_global[j] * tmp;
local_offset += stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[local_offset];
}
__syncthreads();
}
}
int main(int argc, char **argv) {
if (argc > 1) {
repeat = atoi(argv[1]);
}
double *input = new double[data_size]();
double *output = new double[data_size]();
for (size_t i = 0; i < data_size; i++) {
input[i] = i;
}
const int nblocks = d4 * d5 * d6;
const int tile_size = d1 * d2 * d3;
const int dim_output = 3;
const int dim_input = 3;
double *d_output, *d_input;
int *d_shape_input, *d_shape_output;
float *d_shape_input_r, *d_shape_output_r;
int *d_stride_output_local, *d_stride_output_global;
int *d_stride_input;
hipMalloc(&d_output, data_size * sizeof(double));
hipMalloc(&d_input, data_size * sizeof(double));
hipMalloc(&d_shape_input, dim_input * sizeof(int));
hipMalloc(&d_shape_input_r, dim_input * sizeof(float));
hipMalloc(&d_shape_output, dim_output * sizeof(int));
hipMalloc(&d_shape_output_r, dim_output * sizeof(float));
hipMalloc(&d_stride_input, dim_input * sizeof(int));
hipMalloc(&d_stride_output_local, dim_output * sizeof(int));
hipMalloc(&d_stride_output_global, dim_output * sizeof(int));
hipMemcpy(d_input, input,
data_size * sizeof(double), hipMemcpyHostToDevice );
hipMemcpy(d_shape_input, shape_input,
dim_input * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy(d_shape_input_r, shape_input_r,
dim_input * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy(d_shape_output, shape_output,
dim_output * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy(d_shape_output_r, shape_output_r,
dim_output * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy(d_stride_input, stride_input,
dim_input * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy(d_stride_output_local, stride_output_local,
dim_output * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy(d_stride_output_global, stride_output_global,
dim_output * sizeof(int), hipMemcpyHostToDevice );
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; ++i) {
hipLaunchKernelGGL(( tensor_transpose), dim3(nblocks), dim3(NTHREADS), 0, 0, dim_input,
dim_output,
nblocks,
tile_size,
d_shape_input,
d_shape_output,
d_shape_input_r,
d_shape_output_r,
d_stride_input,
d_stride_output_local,
d_stride_output_global,
d_input, d_output);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(output, d_output, data_size * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_output);
hipFree(d_input);
hipFree(d_shape_input);
hipFree(d_shape_input_r);
hipFree(d_shape_output);
hipFree(d_shape_output_r);
hipFree(d_stride_input);
hipFree(d_stride_output_local);
hipFree(d_stride_output_global);
verify(input, output);
delete [] input;
delete [] output;
return 0;
}
| 4db36d66e205d59e10d7e50cbc74b8c8a363c5b6.cu | #include <cstdio>
#include <cstdlib>
#include <chrono>
#include <cuda.h>
#define TILE_SIZE 5900
#define NTHREADS 256
// 1,2,3,4,5,6 -> 2,3,4,6,1,5
static const int d1 = 41, d2 = 13, d3 = 11, d4 = 9, d5 = 76, d6 = 50;
static const int data_size = d1 * d2 * d3 * d4 * d5 * d6;
static int repeat = 1;
static const int shape_output[] = {d2, d3, d1};
static const int shape_input[] = {d4, d5, d6};
static const float shape_output_r[] = {1.f / d2, 1.f / d3, 1.f / d1};
static const float shape_input_r[] = {1.f / d4, 1.f / d5, 1.f / d6};
static const int stride_output_local[] = {d1, d1 * d2, 1};
static const int stride_output_global[] = {1, d2, d2 * d3 * d4 * d6};
static const int stride_input[] = {d2 * d3, d2 * d3 * d4 * d6 * d1, d2 * d3 * d4};
void verify(double *input, double *output) {
int input_offset = 2 + d1 * (2 + d2 * (2 + d3 * (2 + d4 * (0 + 2 * d5))));
int output_offset = 2 + d2 * (2 + d3 * (2 + d4 * (2 + d6 * (2 + 0 * d1))));
bool error = false;
for (size_t i = 0; i < d5; i++) {
if (input[input_offset + i * d1 * d2 * d3 * d4] !=
output[output_offset + i * d2 * d3 * d4 * d6 * d1]) {
printf("FAIL\n");
error = true;
break;
}
}
if (!error) printf("PASS\n");
}
__global__ void tensor_transpose(
const int dim_input,
const int dim_output,
const int nblocks,
const int tile_size,
const int *shape_input,
const int *shape_output,
const float *shape_input_r,
const float *shape_output_r,
const int *stride_input,
const int *stride_output_local,
const int *stride_output_global,
const double *input,
double *output)
{
__shared__ double tile[TILE_SIZE];
for (int block_idx = blockIdx.x; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
for (int i = 0; i < dim_input; i++) {
im = it * shape_input_r[i]; // replace division with multiplication
offset1 += stride_input[i] * (it - im * shape_input[i]);
it = im;
}
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
tile[i] = input[i + block_idx * tile_size];
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it * shape_output_r[j]; // replace division with multiplication
int tmp = it - im * shape_output[j];
offset2 += stride_output_global[j] * tmp;
local_offset += stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[local_offset];
}
__syncthreads();
}
}
int main(int argc, char **argv) {
if (argc > 1) {
repeat = atoi(argv[1]);
}
double *input = new double[data_size]();
double *output = new double[data_size]();
for (size_t i = 0; i < data_size; i++) {
input[i] = i;
}
const int nblocks = d4 * d5 * d6;
const int tile_size = d1 * d2 * d3;
const int dim_output = 3;
const int dim_input = 3;
double *d_output, *d_input;
int *d_shape_input, *d_shape_output;
float *d_shape_input_r, *d_shape_output_r;
int *d_stride_output_local, *d_stride_output_global;
int *d_stride_input;
cudaMalloc(&d_output, data_size * sizeof(double));
cudaMalloc(&d_input, data_size * sizeof(double));
cudaMalloc(&d_shape_input, dim_input * sizeof(int));
cudaMalloc(&d_shape_input_r, dim_input * sizeof(float));
cudaMalloc(&d_shape_output, dim_output * sizeof(int));
cudaMalloc(&d_shape_output_r, dim_output * sizeof(float));
cudaMalloc(&d_stride_input, dim_input * sizeof(int));
cudaMalloc(&d_stride_output_local, dim_output * sizeof(int));
cudaMalloc(&d_stride_output_global, dim_output * sizeof(int));
cudaMemcpy(d_input, input,
data_size * sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(d_shape_input, shape_input,
dim_input * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_shape_input_r, shape_input_r,
dim_input * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy(d_shape_output, shape_output,
dim_output * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_shape_output_r, shape_output_r,
dim_output * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy(d_stride_input, stride_input,
dim_input * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_stride_output_local, stride_output_local,
dim_output * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_stride_output_global, stride_output_global,
dim_output * sizeof(int), cudaMemcpyHostToDevice );
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; ++i) {
tensor_transpose<<<nblocks, NTHREADS>>>(dim_input,
dim_output,
nblocks,
tile_size,
d_shape_input,
d_shape_output,
d_shape_input_r,
d_shape_output_r,
d_stride_input,
d_stride_output_local,
d_stride_output_global,
d_input, d_output);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
cudaMemcpy(output, d_output, data_size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_output);
cudaFree(d_input);
cudaFree(d_shape_input);
cudaFree(d_shape_input_r);
cudaFree(d_shape_output);
cudaFree(d_shape_output_r);
cudaFree(d_stride_input);
cudaFree(d_stride_output_local);
cudaFree(d_stride_output_global);
verify(input, output);
delete [] input;
delete [] output;
return 0;
}
|
67675005b4d1c286a9097edb233f9eb398669d98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void xMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const int strideH, const int strideW) {
// TODO: use block dim instead
const int hOut = (h + strideH - 1) / strideH;
const int wOut = (w + strideW - 1) / strideW;
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int yOut = id % wOut; id /= wOut; // 0-indexed
const int xOut = id % hOut; id /= hOut; // 0-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and xOut < hOut and yOut < wOut) {
const int x = xOut*strideH + 1;
const int y = yOut*strideW + 1;
tmpArray += windowIdx * hOut * wOut;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[xOut*wOut + yOut] *= -delta;
}
} | 67675005b4d1c286a9097edb233f9eb398669d98.cu | #include "includes.h"
__global__ void xMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const int strideH, const int strideW) {
// TODO: use block dim instead
const int hOut = (h + strideH - 1) / strideH;
const int wOut = (w + strideW - 1) / strideW;
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int yOut = id % wOut; id /= wOut; // 0-indexed
const int xOut = id % hOut; id /= hOut; // 0-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and xOut < hOut and yOut < wOut) {
const int x = xOut*strideH + 1;
const int y = yOut*strideW + 1;
tmpArray += windowIdx * hOut * wOut;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[xOut*wOut + yOut] *= -delta;
}
} |
810436f8ec30a2bb797f3d80f420cd082ed69285.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <torch/types.h>
__device__ __forceinline__ float sum_reduce(float acc, float x) {
return acc + x;
}
__device__ __forceinline__ float sum_init() {
return 0;
}
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
hipLaunchKernelGGL(( topoSimpleSPMMKernel), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
hipLaunchKernelGGL(( topoCacheSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,4,1)), 128*sizeof(int), 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
hipLaunchKernelGGL(( topoCacheCoarsenSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,8,1)), 8*32*sizeof(int), 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
}
| 810436f8ec30a2bb797f3d80f420cd082ed69285.cu | #include <cuda.h>
#include <torch/types.h>
__device__ __forceinline__ float sum_reduce(float acc, float x) {
return acc + x;
}
__device__ __forceinline__ float sum_init() {
return 0;
}
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
topoSimpleSPMMKernel<<< dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
topoCacheSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,4,1), 128*sizeof(int)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
topoCacheCoarsenSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,8,1), 8*32*sizeof(int)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
}
|
0c842dd378fc5b78704ce536e7df66413cd50206.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/TemporalMaxPooling.hip"
#else
static inline void THNN_(TemporalMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int input_w;
int input_n;
int output_w;
int ndims = input->dim();
if (ndims == 3)
{
dimT = 1;
dimF = 2;
}
THArgCheck(kW > 0, 5,
"kernel size should be greater than zero, but got kW: %d", kW);
THArgCheck(dW > 0, 6,
"stride should be greater than zero, but got dW: %d", dW);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input,
"non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size(dimT) >= kW, 2,
"input sequence smaller than kernel size. Got: %d, Expected: %d",
input->size(dimT), kW);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndims, dimT, output_w);
THCUNN_check_dim_size(state, gradOutput, ndims, dimF, input_n)
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, ndims, dimT, output_w);
THCUNN_check_dim_size_indices(state, indices, ndims, dimF, input_n);
}
}
void THNN_(TemporalMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int batch = 1;
int input_w;
int input_n;
int output_w;
int nthreads;
scalar_t *input_data;
scalar_t *output_data;
THCIndex_t *indices_data;
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW);
if (input->dim() == 3)
{
dimT = 1;
dimF = 2;
batch = input->size(0);
}
input = THCTensor_(newContiguous)(state, input);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
if (input->dim() == 2)
{
THCTensor_(resize2d)(state, output, output_w, input->size(dimF));
THCIndexTensor_(resize2d)(state, indices, output_w, input->size(dimF));
}
else
{
THCTensor_(resize3d)(state, output, batch, output_w, input->size(dimF));
THCIndexTensor_(resize3d)(state, indices, batch, output_w, input->size(dimF));
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
indices_data = THCIndexTensor_(data)(state, indices);
dim3 blocks(batch);
nthreads = (output_w / 32) * 32;
if (output_w % 32 > 0) {
nthreads += 32;
}
if (nthreads > TEMPORAL_MAX_POOLING_THREADS) {
blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS;
if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) {
blocks.y += 1;
}
nthreads = TEMPORAL_MAX_POOLING_THREADS;
}
dim3 threads(nthreads);
hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateOutputKernel) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
input_data, output_data, indices_data, input_w, input_n, output_w, kW, dW);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(TemporalMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int batch = 1;
int input_w;
int input_n;
int output_w;
int nthreads;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THCIndex_t *indices_data;
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices);
THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
if (input->dim() == 3)
{
dimT = 1;
dimF = 2;
batch = input->size(0);
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
indices_data = THCIndexTensor_(data)(state, indices);
dim3 blocks(batch);
nthreads = (output_w / 32) * 32;
if (output_w % 32 > 0) {
nthreads += 32;
}
if (nthreads > TEMPORAL_MAX_POOLING_THREADS) {
blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS;
if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) {
blocks.y += 1;
}
nthreads = TEMPORAL_MAX_POOLING_THREADS;
}
dim3 threads(nthreads);
if (kW <= dW) {
hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateGradInputKernel) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW);
} else {
hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateGradInputKernelAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
| 0c842dd378fc5b78704ce536e7df66413cd50206.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/TemporalMaxPooling.cu"
#else
static inline void THNN_(TemporalMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int input_w;
int input_n;
int output_w;
int ndims = input->dim();
if (ndims == 3)
{
dimT = 1;
dimF = 2;
}
THArgCheck(kW > 0, 5,
"kernel size should be greater than zero, but got kW: %d", kW);
THArgCheck(dW > 0, 6,
"stride should be greater than zero, but got dW: %d", dW);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input,
"non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size(dimT) >= kW, 2,
"input sequence smaller than kernel size. Got: %d, Expected: %d",
input->size(dimT), kW);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndims, dimT, output_w);
THCUNN_check_dim_size(state, gradOutput, ndims, dimF, input_n)
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, ndims, dimT, output_w);
THCUNN_check_dim_size_indices(state, indices, ndims, dimF, input_n);
}
}
void THNN_(TemporalMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int batch = 1;
int input_w;
int input_n;
int output_w;
int nthreads;
scalar_t *input_data;
scalar_t *output_data;
THCIndex_t *indices_data;
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW);
if (input->dim() == 3)
{
dimT = 1;
dimF = 2;
batch = input->size(0);
}
input = THCTensor_(newContiguous)(state, input);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
if (input->dim() == 2)
{
THCTensor_(resize2d)(state, output, output_w, input->size(dimF));
THCIndexTensor_(resize2d)(state, indices, output_w, input->size(dimF));
}
else
{
THCTensor_(resize3d)(state, output, batch, output_w, input->size(dimF));
THCIndexTensor_(resize3d)(state, indices, batch, output_w, input->size(dimF));
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
indices_data = THCIndexTensor_(data)(state, indices);
dim3 blocks(batch);
nthreads = (output_w / 32) * 32;
if (output_w % 32 > 0) {
nthreads += 32;
}
if (nthreads > TEMPORAL_MAX_POOLING_THREADS) {
blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS;
if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) {
blocks.y += 1;
}
nthreads = TEMPORAL_MAX_POOLING_THREADS;
}
dim3 threads(nthreads);
cunn_TemporalMaxPooling_updateOutputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>(
input_data, output_data, indices_data, input_w, input_n, output_w, kW, dW);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(TemporalMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int dW) {
int dimT = 0; // Temporal dimension
int dimF = 1; // Feature dimension
int batch = 1;
int input_w;
int input_n;
int output_w;
int nthreads;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THCIndex_t *indices_data;
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices);
THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
if (input->dim() == 3)
{
dimT = 1;
dimF = 2;
batch = input->size(0);
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
input_w = input->size(dimT);
input_n = input->size(dimF);
output_w = (input_w - kW) / dW + 1;
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
indices_data = THCIndexTensor_(data)(state, indices);
dim3 blocks(batch);
nthreads = (output_w / 32) * 32;
if (output_w % 32 > 0) {
nthreads += 32;
}
if (nthreads > TEMPORAL_MAX_POOLING_THREADS) {
blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS;
if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) {
blocks.y += 1;
}
nthreads = TEMPORAL_MAX_POOLING_THREADS;
}
dim3 threads(nthreads);
if (kW <= dW) {
cunn_TemporalMaxPooling_updateGradInputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>(
gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW);
} else {
cunn_TemporalMaxPooling_updateGradInputKernelAtomic <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>(
gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
|
465eec17c56e073ba59a9d5832bb6a5700e589c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void ROIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
T* top_data,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
if (argmax_data) {
argmax_data[index] = maxidx;
}
}
}
template <typename T>
__global__ void ROIPoolBackward(
const int nthreads,
const T* top_diff,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template <>
bool RoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
auto* A = is_test_ ? nullptr : Output(1); // argmaxes
// Handle empty rois
if (R.size() == 0) {
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// mutable_data calls are needed to allocate the tensors
Y->mutable_data<float>();
if (!is_test_) {
A->Resize(Y->dims());
A->mutable_data<int>();
}
return true;
}
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
if (!is_test_) {
A->Resize(Y->dims());
}
int output_size = Y->size();
int* argmax_data = is_test_ ? nullptr : A->mutable_data<int>();
hipLaunchKernelGGL(( ROIPoolForward<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
R.data<float>(),
Y->mutable_data<float>(),
argmax_data);
return true;
}
template <>
bool RoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( ROIPoolBackward<float>),
dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
dY.size(),
dY.data<float>(),
A.data<int>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
dX->mutable_data<float>(),
R.data<float>());
}
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(RoIPool, RoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolGradient, RoIPoolGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
| 465eec17c56e073ba59a9d5832bb6a5700e589c4.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void ROIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
T* top_data,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
if (argmax_data) {
argmax_data[index] = maxidx;
}
}
}
template <typename T>
__global__ void ROIPoolBackward(
const int nthreads,
const T* top_diff,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template <>
bool RoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
auto* A = is_test_ ? nullptr : Output(1); // argmaxes
// Handle empty rois
if (R.size() == 0) {
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// mutable_data calls are needed to allocate the tensors
Y->mutable_data<float>();
if (!is_test_) {
A->Resize(Y->dims());
A->mutable_data<int>();
}
return true;
}
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
if (!is_test_) {
A->Resize(Y->dims());
}
int output_size = Y->size();
int* argmax_data = is_test_ ? nullptr : A->mutable_data<int>();
ROIPoolForward<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
R.data<float>(),
Y->mutable_data<float>(),
argmax_data);
return true;
}
template <>
bool RoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
ROIPoolBackward<float><<<
CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
dY.size(),
dY.data<float>(),
A.data<int>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
dX->mutable_data<float>(),
R.data<float>());
}
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(RoIPool, RoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolGradient, RoIPoolGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
a47c9846f3e1a68376e0378524bf37d57eea7e06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "ShaderStructs.h"
#include "helper_cuda.h"
#include "sinewave_cuda.h"
__global__ void sinewave_gen_kernel(Vertex *vertices, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
if (y < height && x < width)
{
// write output vertex
vertices[y*width+x].position.x = u;
vertices[y*width+x].position.y = w;
vertices[y*width+x].position.z = v;
vertices[y*width+x].color.x = 1.0f;
vertices[y*width+x].color.y = 0.0f;
vertices[y*width+x].color.z = 0.0f;
vertices[y*width + x].color.w = 0.0f;
}
}
Vertex* cudaImportVertexBuffer(void*sharedHandle, cudaExternalMemory_t &externalMemory, int meshWidth, int meshHeight)
{
cudaExternalMemoryHandleDesc externalMemoryHandleDesc;
memset(&externalMemoryHandleDesc, 0, sizeof(externalMemoryHandleDesc));
externalMemoryHandleDesc.type = cudaExternalMemoryHandleTypeD3D11ResourceKmt;
externalMemoryHandleDesc.size = sizeof(Vertex) * meshHeight * meshWidth;
externalMemoryHandleDesc.flags = cudaExternalMemoryDedicated;
externalMemoryHandleDesc.handle.win32.handle = sharedHandle;
checkCudaErrors(cudaImportExternalMemory(&externalMemory, &externalMemoryHandleDesc));
cudaExternalMemoryBufferDesc externalMemoryBufferDesc;
memset(&externalMemoryBufferDesc, 0, sizeof(externalMemoryBufferDesc));
externalMemoryBufferDesc.offset = 0;
externalMemoryBufferDesc.size = sizeof(Vertex) * meshHeight * meshWidth;
externalMemoryBufferDesc.flags = 0;
Vertex* cudaDevVertptr = NULL;
checkCudaErrors(cudaExternalMemoryGetMappedBuffer((void**)&cudaDevVertptr, externalMemory, &externalMemoryBufferDesc));
return cudaDevVertptr;
}
void cudaImportKeyedMutex(void*sharedHandle, cudaExternalSemaphore_t &extSemaphore)
{
cudaExternalSemaphoreHandleDesc extSemaDesc;
memset(&extSemaDesc, 0, sizeof(extSemaDesc));
extSemaDesc.type = cudaExternalSemaphoreHandleTypeKeyedMutexKmt;
extSemaDesc.handle.win32.handle = sharedHandle;
extSemaDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&extSemaphore, &extSemaDesc));
}
void cudaAcquireSync(cudaExternalSemaphore_t &extSemaphore, uint64_t key, unsigned int timeoutMs, hipStream_t streamToRun)
{
cudaExternalSemaphoreWaitParams extSemWaitParams;
memset(&extSemWaitParams, 0, sizeof(extSemWaitParams));
extSemWaitParams.params.keyedMutex.key = key;
extSemWaitParams.params.keyedMutex.timeoutMs = timeoutMs;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(&extSemaphore, &extSemWaitParams, 1, streamToRun));
}
void cudaReleaseSync(cudaExternalSemaphore_t &extSemaphore, uint64_t key, hipStream_t streamToRun)
{
cudaExternalSemaphoreSignalParams extSemSigParams;
memset(&extSemSigParams, 0, sizeof(extSemSigParams));
extSemSigParams.params.keyedMutex.key = key;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(&extSemaphore, &extSemSigParams, 1, streamToRun));
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void RunSineWaveKernel(cudaExternalSemaphore_t &extSemaphore, uint64_t &key, unsigned int timeoutMs,
unsigned int mesh_width, unsigned int mesh_height, Vertex *cudaDevVertptr, hipStream_t streamToRun)
{
static float t = 0.0f;
cudaAcquireSync(extSemaphore, key++, timeoutMs, streamToRun);
dim3 block(16, 16, 1);
dim3 grid(mesh_width / 16, mesh_height / 16, 1);
hipLaunchKernelGGL(( sinewave_gen_kernel), dim3(grid), dim3(block), 0, streamToRun , cudaDevVertptr, mesh_width, mesh_height, t);
getLastCudaError("sinewave_gen_kernel execution failed.\n");
cudaReleaseSync(extSemaphore, key, streamToRun);
t += 0.01f;
}
| a47c9846f3e1a68376e0378524bf37d57eea7e06.cu | /*
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "ShaderStructs.h"
#include "helper_cuda.h"
#include "sinewave_cuda.h"
__global__ void sinewave_gen_kernel(Vertex *vertices, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
if (y < height && x < width)
{
// write output vertex
vertices[y*width+x].position.x = u;
vertices[y*width+x].position.y = w;
vertices[y*width+x].position.z = v;
vertices[y*width+x].color.x = 1.0f;
vertices[y*width+x].color.y = 0.0f;
vertices[y*width+x].color.z = 0.0f;
vertices[y*width + x].color.w = 0.0f;
}
}
Vertex* cudaImportVertexBuffer(void*sharedHandle, cudaExternalMemory_t &externalMemory, int meshWidth, int meshHeight)
{
cudaExternalMemoryHandleDesc externalMemoryHandleDesc;
memset(&externalMemoryHandleDesc, 0, sizeof(externalMemoryHandleDesc));
externalMemoryHandleDesc.type = cudaExternalMemoryHandleTypeD3D11ResourceKmt;
externalMemoryHandleDesc.size = sizeof(Vertex) * meshHeight * meshWidth;
externalMemoryHandleDesc.flags = cudaExternalMemoryDedicated;
externalMemoryHandleDesc.handle.win32.handle = sharedHandle;
checkCudaErrors(cudaImportExternalMemory(&externalMemory, &externalMemoryHandleDesc));
cudaExternalMemoryBufferDesc externalMemoryBufferDesc;
memset(&externalMemoryBufferDesc, 0, sizeof(externalMemoryBufferDesc));
externalMemoryBufferDesc.offset = 0;
externalMemoryBufferDesc.size = sizeof(Vertex) * meshHeight * meshWidth;
externalMemoryBufferDesc.flags = 0;
Vertex* cudaDevVertptr = NULL;
checkCudaErrors(cudaExternalMemoryGetMappedBuffer((void**)&cudaDevVertptr, externalMemory, &externalMemoryBufferDesc));
return cudaDevVertptr;
}
void cudaImportKeyedMutex(void*sharedHandle, cudaExternalSemaphore_t &extSemaphore)
{
cudaExternalSemaphoreHandleDesc extSemaDesc;
memset(&extSemaDesc, 0, sizeof(extSemaDesc));
extSemaDesc.type = cudaExternalSemaphoreHandleTypeKeyedMutexKmt;
extSemaDesc.handle.win32.handle = sharedHandle;
extSemaDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&extSemaphore, &extSemaDesc));
}
void cudaAcquireSync(cudaExternalSemaphore_t &extSemaphore, uint64_t key, unsigned int timeoutMs, cudaStream_t streamToRun)
{
cudaExternalSemaphoreWaitParams extSemWaitParams;
memset(&extSemWaitParams, 0, sizeof(extSemWaitParams));
extSemWaitParams.params.keyedMutex.key = key;
extSemWaitParams.params.keyedMutex.timeoutMs = timeoutMs;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(&extSemaphore, &extSemWaitParams, 1, streamToRun));
}
void cudaReleaseSync(cudaExternalSemaphore_t &extSemaphore, uint64_t key, cudaStream_t streamToRun)
{
cudaExternalSemaphoreSignalParams extSemSigParams;
memset(&extSemSigParams, 0, sizeof(extSemSigParams));
extSemSigParams.params.keyedMutex.key = key;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(&extSemaphore, &extSemSigParams, 1, streamToRun));
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void RunSineWaveKernel(cudaExternalSemaphore_t &extSemaphore, uint64_t &key, unsigned int timeoutMs,
unsigned int mesh_width, unsigned int mesh_height, Vertex *cudaDevVertptr, cudaStream_t streamToRun)
{
static float t = 0.0f;
cudaAcquireSync(extSemaphore, key++, timeoutMs, streamToRun);
dim3 block(16, 16, 1);
dim3 grid(mesh_width / 16, mesh_height / 16, 1);
sinewave_gen_kernel<<< grid, block, 0, streamToRun >>>(cudaDevVertptr, mesh_width, mesh_height, t);
getLastCudaError("sinewave_gen_kernel execution failed.\n");
cudaReleaseSync(extSemaphore, key, streamToRun);
t += 0.01f;
}
|
adac791113b31d4b47545833ea20676f38c8018a.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "math.h"
__inline__ __device__ void randomUniform(hiprandState_t* state, double* randNum)
{
*randNum = hiprand_uniform_double(state);
}
__inline__ __device__ void randomUniform(hiprandState_t* state, float* randNum)
{
*randNum = hiprand_uniform(state);
}
__inline__ __device__ void randomNormal2(hiprandState_t* state, double2* randNum)
{
*randNum = hiprand_normal2_double(state);
}
__inline__ __device__ void randomNormal2(hiprandState_t* state, float2* randNum)
{
*randNum = hiprand_normal2(state);
}
__inline__ __device__ void randomNormal2(hiprandState_t* state, double3* randNum)
{
double2 tmp = hiprand_normal2_double(state);
randNum->x = tmp.x;
randNum->y = tmp.y;
}
__inline__ __device__ void randomNormal2(hiprandState_t* state, float3* randNum)
{
float2 tmp = hiprand_normal2(state);
randNum->x = tmp.x;
randNum->y = tmp.y;
}
__inline__ __device__ void randomNormal(hiprandState_t* state, double* randNum)
{
*randNum = hiprand_normal_double(state);
}
__inline__ __device__ void randomNormal(hiprandState_t* state, float* randNum)
{
*randNum = hiprand_normal(state);
}
template<typename T, typename T3>
__device__ T3 randomDirection3(hiprandState_t* state)
{
T divFactor = 0.0;
T r_divFactor;
T3 w;
while(divFactor < 1e-5)
{
randomNormal(state,&w.x);
randomNormal(state,&w.y);
randomNormal(state,&w.z);
divFactor = norm3d(w.x,w.y,w.z);
}
r_divFactor = 1 / divFactor;
w.x *= r_divFactor;
w.y *= r_divFactor;
w.z *= r_divFactor;
return w;
}
template<typename T, typename T2>
__device__ T2 randomDirection2(hiprandState_t* state)
{
T divFactor = 0.0;
T r_divFactor;
T2 w;
while(divFactor < 1e-5)
{
randomNormal2(state,&w);
divFactor = hypot(w.x,w.y);
}
r_divFactor = 1 / divFactor;
w.x *= r_divFactor;
w.y *= r_divFactor;
return w;
}
template<typename T, typename T2, typename T3>
__device__ T3 random_vMF_direction(T3 center, T kappa, hiprandState_t* state)
{
T b = -kappa + hypot(kappa,(T)1.0);
T dir_x0 = (1.0-b)/(1.0+b);
T s_c = kappa * dir_x0 + 2.0 * log(1.0 - dir_x0 * dir_x0);
T z, u, w_z, t;
do
{
randomUniform(state,&z); // beta(1,1) is uniform
randomUniform(state,&u);
w_z = (1.0 - (1.0+b)*z)/(1.0 - (1.0-b)*z);
t = kappa * w_z + 2.0 * log(1.0 - dir_x0 * w_z) - s_c;
}
while(t < log(u));
T2 v = randomDirection2<T,T2>(state);
T3 sample;
T sqrt_w = sqrt(abs(1.0 - w_z * w_z));
sample.x = sqrt_w * v.x;
sample.y = sqrt_w * v.y;
sample.z = w_z;
// house the center
T s = center.x * center.x + center.y * center.y;
T3 mu_housed;
mu_housed.x = center.x;
mu_housed.y = center.y;
mu_housed.z = 1.0;
if(abs(s) < 1e-8)
{
b = 0.0;
}
else
{
mu_housed.z = (center.z <= 0.0 ? center.z - 1.0 : -s/(center.z + 1.0));
b = 2.0 * (mu_housed.z * mu_housed.z) /(s + mu_housed.z * mu_housed.z);
mu_housed.x = mu_housed.x / mu_housed.z;
mu_housed.y = mu_housed.y / mu_housed.z;
mu_housed.z = 1.0;
}
T3 sample_housed;
sample_housed.x = (1.0 - b * mu_housed.x * mu_housed.x) * sample.x +
( - b * mu_housed.x * mu_housed.y) * sample.y +
( - b * mu_housed.x * mu_housed.z) * sample.z;
sample_housed.y = ( - b * mu_housed.y * mu_housed.x) * sample.x +
(1.0 - b * mu_housed.y * mu_housed.y) * sample.y +
( - b * mu_housed.y * mu_housed.z) * sample.z;
sample_housed.z = ( - b * mu_housed.z * mu_housed.x) * sample.x +
( - b * mu_housed.z * mu_housed.y) * sample.y +
(1.0 - b * mu_housed.z * mu_housed.z) * sample.z;
return sample_housed;
}
| adac791113b31d4b47545833ea20676f38c8018a.cu | #pragma once
#include "cuda_runtime.h"
#include "curand.h"
#include "curand_kernel.h"
#include "math.h"
__inline__ __device__ void randomUniform(curandState_t* state, double* randNum)
{
*randNum = curand_uniform_double(state);
}
__inline__ __device__ void randomUniform(curandState_t* state, float* randNum)
{
*randNum = curand_uniform(state);
}
__inline__ __device__ void randomNormal2(curandState_t* state, double2* randNum)
{
*randNum = curand_normal2_double(state);
}
__inline__ __device__ void randomNormal2(curandState_t* state, float2* randNum)
{
*randNum = curand_normal2(state);
}
__inline__ __device__ void randomNormal2(curandState_t* state, double3* randNum)
{
double2 tmp = curand_normal2_double(state);
randNum->x = tmp.x;
randNum->y = tmp.y;
}
__inline__ __device__ void randomNormal2(curandState_t* state, float3* randNum)
{
float2 tmp = curand_normal2(state);
randNum->x = tmp.x;
randNum->y = tmp.y;
}
__inline__ __device__ void randomNormal(curandState_t* state, double* randNum)
{
*randNum = curand_normal_double(state);
}
__inline__ __device__ void randomNormal(curandState_t* state, float* randNum)
{
*randNum = curand_normal(state);
}
template<typename T, typename T3>
__device__ T3 randomDirection3(curandState_t* state)
{
T divFactor = 0.0;
T r_divFactor;
T3 w;
while(divFactor < 1e-5)
{
randomNormal(state,&w.x);
randomNormal(state,&w.y);
randomNormal(state,&w.z);
divFactor = norm3d(w.x,w.y,w.z);
}
r_divFactor = 1 / divFactor;
w.x *= r_divFactor;
w.y *= r_divFactor;
w.z *= r_divFactor;
return w;
}
template<typename T, typename T2>
__device__ T2 randomDirection2(curandState_t* state)
{
T divFactor = 0.0;
T r_divFactor;
T2 w;
while(divFactor < 1e-5)
{
randomNormal2(state,&w);
divFactor = hypot(w.x,w.y);
}
r_divFactor = 1 / divFactor;
w.x *= r_divFactor;
w.y *= r_divFactor;
return w;
}
template<typename T, typename T2, typename T3>
__device__ T3 random_vMF_direction(T3 center, T kappa, curandState_t* state)
{
T b = -kappa + hypot(kappa,(T)1.0);
T dir_x0 = (1.0-b)/(1.0+b);
T s_c = kappa * dir_x0 + 2.0 * log(1.0 - dir_x0 * dir_x0);
T z, u, w_z, t;
do
{
randomUniform(state,&z); // beta(1,1) is uniform
randomUniform(state,&u);
w_z = (1.0 - (1.0+b)*z)/(1.0 - (1.0-b)*z);
t = kappa * w_z + 2.0 * log(1.0 - dir_x0 * w_z) - s_c;
}
while(t < log(u));
T2 v = randomDirection2<T,T2>(state);
T3 sample;
T sqrt_w = sqrt(abs(1.0 - w_z * w_z));
sample.x = sqrt_w * v.x;
sample.y = sqrt_w * v.y;
sample.z = w_z;
// house the center
T s = center.x * center.x + center.y * center.y;
T3 mu_housed;
mu_housed.x = center.x;
mu_housed.y = center.y;
mu_housed.z = 1.0;
if(abs(s) < 1e-8)
{
b = 0.0;
}
else
{
mu_housed.z = (center.z <= 0.0 ? center.z - 1.0 : -s/(center.z + 1.0));
b = 2.0 * (mu_housed.z * mu_housed.z) /(s + mu_housed.z * mu_housed.z);
mu_housed.x = mu_housed.x / mu_housed.z;
mu_housed.y = mu_housed.y / mu_housed.z;
mu_housed.z = 1.0;
}
T3 sample_housed;
sample_housed.x = (1.0 - b * mu_housed.x * mu_housed.x) * sample.x +
( - b * mu_housed.x * mu_housed.y) * sample.y +
( - b * mu_housed.x * mu_housed.z) * sample.z;
sample_housed.y = ( - b * mu_housed.y * mu_housed.x) * sample.x +
(1.0 - b * mu_housed.y * mu_housed.y) * sample.y +
( - b * mu_housed.y * mu_housed.z) * sample.z;
sample_housed.z = ( - b * mu_housed.z * mu_housed.x) * sample.x +
( - b * mu_housed.z * mu_housed.y) * sample.y +
(1.0 - b * mu_housed.z * mu_housed.z) * sample.z;
return sample_housed;
}
|
ef4f8feaf072fa7f5077b9983f4da5472392293c.hip | // !!! This is a file automatically generated by hipify!!!
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_twisted_clover_preconditioned.cuh>
/**
This is the preconditioned gauged twisted-mass operator
*/
namespace quda
{
template <typename Arg> class TwistedCloverPreconditioned : public Dslash<twistedCloverPreconditioned, Arg>
{
using Dslash = Dslash<twistedCloverPreconditioned, Arg>;
using Dslash::arg;
using Dslash::in;
public:
TwistedCloverPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash(arg, out, in)
{
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
// specialize here to constrain the template instantiation
if (arg.nParity == 1) {
if (arg.xpay) {
if (arg.dagger) errorQuda("xpay operator only defined for not dagger");
Dslash::template instantiate<packShmem, 1, false, true>(tp, stream);
} else {
if (arg.dagger)
Dslash::template instantiate<packShmem, 1, true, false>(tp, stream);
else
Dslash::template instantiate<packShmem, 1, false, false>(tp, stream);
}
} else {
errorQuda("Preconditioned twisted-clover operator not defined nParity=%d", arg.nParity);
}
}
long long flops() const
{
int clover_flops = 504 + 48;
long long flops = Dslash::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops += clover_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
flops += clover_flops * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += clover_flops * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops -= clover_flops * ghost_sites;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0);
if (!arg.dynamic_clover) clover_bytes *= 2;
long long bytes = Dslash::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes += clover_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
bytes += clover_bytes * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes -= clover_bytes * ghost_sites;
break;
}
return bytes;
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct TwistedCloverPreconditionedApply {
inline TwistedCloverPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &C, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
TwistedCloverArg<Float, nColor, nDim, recon> arg(out, in, U, C, a, b, xpay, x, parity, dagger, comm_override);
TwistedCloverPreconditioned<decltype(arg)> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
/*
Apply the preconditioned twisted-mass Dslash operator
out = x + a*A^{-1} D * in = x + a*(C + i*b*gamma_5)^{-1}*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
*/
void ApplyTwistedCloverPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &C, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_TWISTED_CLOVER_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, C);
// check all locations match
checkLocation(out, in, U, C);
instantiate<TwistedCloverPreconditionedApply>(out, in, U, C, a, b, xpay, x, parity, dagger, comm_override, profile);
#else
errorQuda("Twisted-clover dslash has not been built");
#endif // GPU_TWISTED_CLOVER_DIRAC
}
} // namespace quda
| ef4f8feaf072fa7f5077b9983f4da5472392293c.cu | #include <gauge_field.h>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_twisted_clover_preconditioned.cuh>
/**
This is the preconditioned gauged twisted-mass operator
*/
namespace quda
{
template <typename Arg> class TwistedCloverPreconditioned : public Dslash<twistedCloverPreconditioned, Arg>
{
using Dslash = Dslash<twistedCloverPreconditioned, Arg>;
using Dslash::arg;
using Dslash::in;
public:
TwistedCloverPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash(arg, out, in)
{
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
// specialize here to constrain the template instantiation
if (arg.nParity == 1) {
if (arg.xpay) {
if (arg.dagger) errorQuda("xpay operator only defined for not dagger");
Dslash::template instantiate<packShmem, 1, false, true>(tp, stream);
} else {
if (arg.dagger)
Dslash::template instantiate<packShmem, 1, true, false>(tp, stream);
else
Dslash::template instantiate<packShmem, 1, false, false>(tp, stream);
}
} else {
errorQuda("Preconditioned twisted-clover operator not defined nParity=%d", arg.nParity);
}
}
long long flops() const
{
int clover_flops = 504 + 48;
long long flops = Dslash::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops += clover_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
flops += clover_flops * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += clover_flops * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops -= clover_flops * ghost_sites;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0);
if (!arg.dynamic_clover) clover_bytes *= 2;
long long bytes = Dslash::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes += clover_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
bytes += clover_bytes * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes -= clover_bytes * ghost_sites;
break;
}
return bytes;
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct TwistedCloverPreconditionedApply {
inline TwistedCloverPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &C, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
TwistedCloverArg<Float, nColor, nDim, recon> arg(out, in, U, C, a, b, xpay, x, parity, dagger, comm_override);
TwistedCloverPreconditioned<decltype(arg)> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
/*
Apply the preconditioned twisted-mass Dslash operator
out = x + a*A^{-1} D * in = x + a*(C + i*b*gamma_5)^{-1}*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
*/
void ApplyTwistedCloverPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &C, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_TWISTED_CLOVER_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, C);
// check all locations match
checkLocation(out, in, U, C);
instantiate<TwistedCloverPreconditionedApply>(out, in, U, C, a, b, xpay, x, parity, dagger, comm_override, profile);
#else
errorQuda("Twisted-clover dslash has not been built");
#endif // GPU_TWISTED_CLOVER_DIRAC
}
} // namespace quda
|
b0c7440df21fb79a38a509ca2ff07d88970d7744.hip | // !!! This is a file automatically generated by hipify!!!
//P2P transfers tested and pinned memory tranfers tested for coupled overlapping Halo Exchanges. P2P works better in this case
#include <omp.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
#include <fstream>
using namespace std;
using namespace std::chrono;
#define IMUL(a,b) __mul24(a,b)
//hipError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = blockDim.x;// dim across x
int dim_y = gridDim.x;
int x_pos = blockIdx.x;
int y_pos = threadIdx.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (domain_Decom == 1)
{
if (numDevices > 1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
}
else if (domain_Decom == 2) {
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
return;
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//========================MultiGPU utility functions============================================================================
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
if (i != j)
{
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed : " << hipGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
if (access)
{
hipDeviceEnablePeerAccess(j, 0);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling : " << hipGetErrorString(err) << endl;
return;
}
if (access)
{
hipDeviceDisablePeerAccess(j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling: " << hipGetErrorString(err) << endl;
return;
}
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
numberOfDevicesAlong_X = (int)sqrt(numDevices);
numberOfDevicesAlong_Y = (int)numberOfDevicesAlong_X;
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim) {
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_Y + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_X - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_X + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_x * numberofDevicesAlong_X) + devCoord_y;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos += dim;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
hipError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices;
hipGetDeviceCount(&numDevices);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
std::vector<float*>x_buffer(numDevices);
std::vector<float*>y_buffer(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipHostMalloc((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float));
hipHostMalloc((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
hipSetDevice(dev);
//cudamalloc the Diagonals
hipMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
hipMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//hipMalloc Halos: North and South--1D. TODO: East and West for 2D
hipMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float));
hipMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float));
hipMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float));
hipMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
hipMalloc((void**)&x_buffer[dev], chunk_X * sizeof(float));
hipMalloc((void**)&y_buffer[dev], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
rowStartPos += dim;
rowEndPos += dim;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
hipSetDevice(dev);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
hipMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
//Copy in and out vectors and RHS
hipMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 1: " << hipGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpy(x_buffer[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpy(y_buffer[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
int blocksize = chunk_X;
int threads = chunk_Y;
//cout << endl<<"blocksize" << blocksize;
//cout << endl<<"thread" << threads;
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
hipStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
hipStream_t streamsforHaloExcahnge[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
hipSetDevice(i);
hipStreamCreate(&streams[i]);
if (p2penabled) {
hipStreamCreate(&streamsforHaloExcahnge[i]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipHostMalloc((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//For testing with and without p2p
//p2penabled = false;
//Check performance
hipError_t status = hipGetLastError();
//MultiThreaded Host to minimize kernel launch latency : using openMP
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
hipSetDevice(dev);
//hipSetDevice(omp_get_thread_num());
//#pragma omp barrier
for (int i = 0; i < iterations; i++)
{
jacobi_Simple <<<blocksize, threads, 0, streams[dev] >> > (d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim);
if (auto err = hipGetLastError())
{
cout << "Kernal Execution failed: " << hipGetErrorString(err) << " Iteration :" << i << endl;
//return err;
}
if (i == (iterations - 1))//Copy the results just for the final iteration
{
hipMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
continue;
}
//Store Halo positions after iteration for exchanging
if (!p2penabled) {
if (numDevices > 1)
{
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
if (auto err = hipGetLastError())
{
cout << "d_nhalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
if (auto err = hipGetLastError())
{
cout << "d_shalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
if (auto err = hipGetLastError())
{
cout << "d_ehalos copy failed D2H: " << hipGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToHost, streams[dev]);
if (auto err = hipGetLastError())
{
cout << "d_whalos copy failed D2H " << hipGetErrorString(err) << endl;
//return err;
}
}
}
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 2: " << hipGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//Synchronize streams from each device
hipStreamSynchronize(streams[dev]);
if (auto err = hipGetLastError())
{
cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << hipGetErrorString(err) << endl;
//return err;
}
if ((!p2penabled)) {
bool exchangeComplete = false;
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
//exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned);
if (exchangeComplete) {
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, streams[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_nhalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), hipMemcpyHostToDevice, streams[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_shalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, streams[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_ehalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), hipMemcpyHostToDevice, streams[dev]);
}
if (auto err = hipGetLastError())
{
cout << "d_whalos copy failed H2D: " << hipGetErrorString(err) << endl;
//return err;
}
}
}
else {
#pragma omp barrier
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
hipMemcpyPeerAsync(x_buffer[dev], dev, d_shalos[devIDtoNorth], devIDtoNorth, chunk_X * sizeof(float), streams[dev]);
//Recieve from the device
hipMemcpyPeerAsync(d_shalos[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), streams[dev]);
hipMemcpyAsync(d_nhalos[dev], x_buffer[dev], chunk_X * sizeof(float), hipMemcpyDeviceToDevice, streams[dev]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
hipMemcpyPeerAsync(y_buffer[dev], dev, d_whalos[devIDtoEast], devIDtoEast, chunk_Y * sizeof(float), streams[dev]);
//Recieve from the device
hipMemcpyPeerAsync(d_whalos[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), streams[dev]);
hipMemcpyAsync(d_ehalos[dev], y_buffer[dev], chunk_Y * sizeof(float), hipMemcpyDeviceToDevice, streams[dev]);
}
}
//hipStreamSynchronize(streams[dev]);
hipDeviceSynchronize();
#pragma omp barrier
}
//==================================CPU Side computation Ends=================================================================================
}
}
//cout << "No if threads currently: " << omp_get_num_threads() << endl;
if (auto err = hipGetLastError())
{
cout << "Data copy failed 3: " << hipGetErrorString(err) << endl;
return err;
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
hipSetDevice(i);
//Destroy Events
//Synchro the streams
hipStreamSynchronize(streams[i]);
hipStreamDestroy(streams[i]);
hipStreamSynchronize(streamsforHaloExcahnge[i]);
hipStreamDestroy(streamsforHaloExcahnge[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipFree(d_A0[dev]);
hipFree(d_A1[dev]);
hipFree(d_A2[dev]);
hipFree(d_A3[dev]);
hipFree(d_A4[dev]);
hipFree(d_Vec_In[dev]);
hipFree(d_Vec_Out[dev]);
hipFree(d_nhalos[dev]);
hipFree(d_shalos[dev]);
hipFree(d_ehalos[dev]);
hipFree(d_whalos[dev]);
hipFree(d_Rhs[dev]);
hipHostFree(partial_resultOnHost[dev]);
hipHostFree(nHalo_pinned[dev]);
hipHostFree(sHalo_pinned[dev]);
hipHostFree(wHalo_pinned[dev]);
hipHostFree(eHalo_pinned[dev]);
hipDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return hipSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
hipError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
/*
//Testing OpenMP here
//Fork a team of threads giving them their own copies of variables
#pragma omp parallel for num_threads(4)
for (int i=0; i < 4; i++)
{
// Obtain thread number
int tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
// Only master thread does this
if (tid == 0)
{
int nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
//All threads join master thread and disband
*/
if (cudaStatus != hipSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != hipSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
| b0c7440df21fb79a38a509ca2ff07d88970d7744.cu | //P2P transfers tested and pinned memory tranfers tested for coupled overlapping Halo Exchanges. P2P works better in this case
#include <omp.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
#include <fstream>
using namespace std;
using namespace std::chrono;
#define IMUL(a,b) __mul24(a,b)
//cudaError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = blockDim.x;// dim across x
int dim_y = gridDim.x;
int x_pos = blockIdx.x;
int y_pos = threadIdx.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (domain_Decom == 1)
{
if (numDevices > 1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
}
else if (domain_Decom == 2) {
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
return;
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//========================MultiGPU utility functions============================================================================
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
if (i != j)
{
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed : " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
if (access)
{
cudaDeviceEnablePeerAccess(j, 0);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i < numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling : " << cudaGetErrorString(err) << endl;
return;
}
if (access)
{
cudaDeviceDisablePeerAccess(j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling: " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
numberOfDevicesAlong_X = (int)sqrt(numDevices);
numberOfDevicesAlong_Y = (int)numberOfDevicesAlong_X;
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim) {
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_Y + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_X - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_X + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_x * numberofDevicesAlong_X) + devCoord_y;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos += dim;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
cudaError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
std::vector<float*>x_buffer(numDevices);
std::vector<float*>y_buffer(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaMallocHost((void**)&nHalo_pinned[dev], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&sHalo_pinned[dev], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&wHalo_pinned[dev], (chunk_Y) * sizeof(float));
cudaMallocHost((void**)&eHalo_pinned[dev], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
cudaSetDevice(dev);
//cudamalloc the Diagonals
cudaMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
cudaMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//cudaMalloc Halos: North and South--1D. TODO: East and West for 2D
cudaMalloc((void**)&d_nhalos[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&d_shalos[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&d_ehalos[dev], chunk_Y * sizeof(float));
cudaMalloc((void**)&d_whalos[dev], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
cudaMalloc((void**)&x_buffer[dev], chunk_X * sizeof(float));
cudaMalloc((void**)&y_buffer[dev], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
rowStartPos += dim;
rowEndPos += dim;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
cudaSetDevice(dev);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
cudaMemcpy(d_A0[dev], &partial_a0[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A1[dev], &partial_a1[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A2[dev], &partial_a2[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A3[dev], &partial_a3[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A4[dev], &partial_a4[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
//Copy in and out vectors and RHS
cudaMemcpy(d_Vec_In[dev], &partial_vec_in[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Vec_Out[dev], &partial_vec_out[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Rhs[dev], &partial_rhs[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 1: " << cudaGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpy(d_ehalos[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpy(d_whalos[dev], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpy(x_buffer[dev], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpy(y_buffer[dev], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
int blocksize = chunk_X;
int threads = chunk_Y;
//cout << endl<<"blocksize" << blocksize;
//cout << endl<<"thread" << threads;
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
cudaStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
cudaStream_t streamsforHaloExcahnge[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
cudaSetDevice(i);
cudaStreamCreate(&streams[i]);
if (p2penabled) {
cudaStreamCreate(&streamsforHaloExcahnge[i]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaMallocHost((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//For testing with and without p2p
//p2penabled = false;
//Check performance
cudaError_t status = cudaGetLastError();
//MultiThreaded Host to minimize kernel launch latency : using openMP
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
cudaSetDevice(dev);
//cudaSetDevice(omp_get_thread_num());
//#pragma omp barrier
for (int i = 0; i < iterations; i++)
{
jacobi_Simple <<<blocksize, threads, 0, streams[dev] >> > (d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim);
if (auto err = cudaGetLastError())
{
cout << "Kernal Execution failed: " << cudaGetErrorString(err) << " Iteration :" << i << endl;
//return err;
}
if (i == (iterations - 1))//Copy the results just for the final iteration
{
cudaMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
continue;
}
//Store Halo positions after iteration for exchanging
if (!p2penabled) {
if (numDevices > 1)
{
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpyAsync(nHalo_pinned[dev], d_nhalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_nhalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpyAsync(sHalo_pinned[dev], d_shalos[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_shalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpyAsync(eHalo_pinned[dev], d_ehalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_ehalos copy failed D2H: " << cudaGetErrorString(err) << endl;
//return err;
}
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpyAsync(wHalo_pinned[dev], d_whalos[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToHost, streams[dev]);
if (auto err = cudaGetLastError())
{
cout << "d_whalos copy failed D2H " << cudaGetErrorString(err) << endl;
//return err;
}
}
}
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 2: " << cudaGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//Synchronize streams from each device
cudaStreamSynchronize(streams[dev]);
if (auto err = cudaGetLastError())
{
cout << "Stream " << dev << " synchronize error for iteration : " << i << ". ERROR IS: " << cudaGetErrorString(err) << endl;
//return err;
}
if ((!p2penabled)) {
bool exchangeComplete = false;
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
//exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
exchangeComplete = exchangehalos_onHostPinned(numDevices, deviceArray, numberOfDevicesAlong_X, nHalo_pinned, sHalo_pinned, eHalo_pinned, wHalo_pinned);
if (exchangeComplete) {
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpyAsync(d_nhalos[dev], nHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, streams[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_nhalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpyAsync(d_shalos[dev], sHalo_pinned[dev], chunk_X * sizeof(float), cudaMemcpyHostToDevice, streams[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_shalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpyAsync(d_ehalos[dev], eHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, streams[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_ehalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpyAsync(d_whalos[dev], wHalo_pinned[dev], chunk_Y * sizeof(float), cudaMemcpyHostToDevice, streams[dev]);
}
if (auto err = cudaGetLastError())
{
cout << "d_whalos copy failed H2D: " << cudaGetErrorString(err) << endl;
//return err;
}
}
}
else {
#pragma omp barrier
//Swap input output vectors for all devices
swap(d_Vec_In[dev], d_Vec_Out[dev]);
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
cudaMemcpyPeerAsync(x_buffer[dev], dev, d_shalos[devIDtoNorth], devIDtoNorth, chunk_X * sizeof(float), streams[dev]);
//Recieve from the device
cudaMemcpyPeerAsync(d_shalos[devIDtoNorth], devIDtoNorth, d_nhalos[dev], dev, chunk_X * sizeof(float), streams[dev]);
cudaMemcpyAsync(d_nhalos[dev], x_buffer[dev], chunk_X * sizeof(float), cudaMemcpyDeviceToDevice, streams[dev]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
cudaMemcpyPeerAsync(y_buffer[dev], dev, d_whalos[devIDtoEast], devIDtoEast, chunk_Y * sizeof(float), streams[dev]);
//Recieve from the device
cudaMemcpyPeerAsync(d_whalos[devIDtoEast], devIDtoEast, d_ehalos[dev], dev, chunk_Y * sizeof(float), streams[dev]);
cudaMemcpyAsync(d_ehalos[dev], y_buffer[dev], chunk_Y * sizeof(float), cudaMemcpyDeviceToDevice, streams[dev]);
}
}
//cudaStreamSynchronize(streams[dev]);
cudaDeviceSynchronize();
#pragma omp barrier
}
//==================================CPU Side computation Ends=================================================================================
}
}
//cout << "No if threads currently: " << omp_get_num_threads() << endl;
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 3: " << cudaGetErrorString(err) << endl;
return err;
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
//Destroy Events
//Synchro the streams
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
cudaStreamSynchronize(streamsforHaloExcahnge[i]);
cudaStreamDestroy(streamsforHaloExcahnge[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaFree(d_A0[dev]);
cudaFree(d_A1[dev]);
cudaFree(d_A2[dev]);
cudaFree(d_A3[dev]);
cudaFree(d_A4[dev]);
cudaFree(d_Vec_In[dev]);
cudaFree(d_Vec_Out[dev]);
cudaFree(d_nhalos[dev]);
cudaFree(d_shalos[dev]);
cudaFree(d_ehalos[dev]);
cudaFree(d_whalos[dev]);
cudaFree(d_Rhs[dev]);
cudaFreeHost(partial_resultOnHost[dev]);
cudaFreeHost(nHalo_pinned[dev]);
cudaFreeHost(sHalo_pinned[dev]);
cudaFreeHost(wHalo_pinned[dev]);
cudaFreeHost(eHalo_pinned[dev]);
cudaDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return cudaSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
cudaError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
/*
//Testing OpenMP here
//Fork a team of threads giving them their own copies of variables
#pragma omp parallel for num_threads(4)
for (int i=0; i < 4; i++)
{
// Obtain thread number
int tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
// Only master thread does this
if (tid == 0)
{
int nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
//All threads join master thread and disband
*/
if (cudaStatus != cudaSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != cudaSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
|
0ba91a687c0c7be19e7037d742dd3030b32cd5d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendona Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "MBPkernels.h"
#define OUTPUT_NEURON threadIdx.x
#define OUTPUT_INCLUDING_BIAS (threadIdx.x + 1)
#define NUM_OUTPUTS blockDim.x
#define NEURON threadIdx.y
#define NUM_NEURONS blockDim.y
#define NUM_INPUTS_OUTPUT_NEURON (NUM_NEURONS + 1)
#define SAMPLE blockIdx.x
namespace GPUMLib {
KERNEL CalcLocalGradSelectiveInputs(cudafloat * rmsF, cudafloat * bestRMS, cudafloat maxErrorGrowth, cudafloat * inputs, cudafloat * selectiveNeuronsWeights, cudafloat * selectiveNeuronsBias, cudafloat * weights, cudafloat * localGradientNextLayer, cudafloat * localGradient) {
extern __shared__ cudafloat lg[];
if (bestRMS != nullptr) {
__shared__ cudafloat rms;
__shared__ cudafloat bRMS;
rms = *rmsF;
bRMS = *bestRMS;
if (rms >= bRMS * maxErrorGrowth) return;
}
cudafloat * lgNextLayer = (lg + (NUM_OUTPUTS * NUM_NEURONS));
if (NEURON == 0) lgNextLayer[OUTPUT_NEURON] = localGradientNextLayer[SAMPLE * NUM_OUTPUTS + OUTPUT_NEURON];
int connection = OUTPUT_NEURON * NUM_INPUTS_OUTPUT_NEURON + NEURON + 1;
int threadId = (NEURON * NUM_OUTPUTS + OUTPUT_NEURON);
__syncthreads();
lg[threadId] = weights[connection] * lgNextLayer[OUTPUT_NEURON];
__syncthreads();
int numberElemSum = NUM_OUTPUTS;
for(int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) {
int nextNumberElemSum = sumUpTo;
if (numberElemSum & 1) nextNumberElemSum++;
if (OUTPUT_NEURON < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum];
numberElemSum = nextNumberElemSum;
__syncthreads();
}
if (OUTPUT_NEURON == 0) {
cudafloat lgn = CUDA_VALUE(0.0);
int n = SAMPLE * NUM_NEURONS + NEURON;
cudafloat i = inputs[n];
if (!IsInfOrNaN(i)) {
cudafloat w = selectiveNeuronsWeights[NEURON];
cudafloat b = selectiveNeuronsBias[NEURON];
if (w != CUDA_VALUE(0.0) || b != CUDA_VALUE(0.0)) { // input may have missing values
cudafloat coshfx = CUDA_COSH(i * w + b);
lgn = lg[threadId] / (coshfx * coshfx); // derivate = 1 / (coshfx * coshfx)
}
}
localGradient[n] = lgn;
}
}
} | 0ba91a687c0c7be19e7037d742dd3030b32cd5d1.cu | /*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendonša Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "MBPkernels.h"
#define OUTPUT_NEURON threadIdx.x
#define OUTPUT_INCLUDING_BIAS (threadIdx.x + 1)
#define NUM_OUTPUTS blockDim.x
#define NEURON threadIdx.y
#define NUM_NEURONS blockDim.y
#define NUM_INPUTS_OUTPUT_NEURON (NUM_NEURONS + 1)
#define SAMPLE blockIdx.x
namespace GPUMLib {
KERNEL CalcLocalGradSelectiveInputs(cudafloat * rmsF, cudafloat * bestRMS, cudafloat maxErrorGrowth, cudafloat * inputs, cudafloat * selectiveNeuronsWeights, cudafloat * selectiveNeuronsBias, cudafloat * weights, cudafloat * localGradientNextLayer, cudafloat * localGradient) {
extern __shared__ cudafloat lg[];
if (bestRMS != nullptr) {
__shared__ cudafloat rms;
__shared__ cudafloat bRMS;
rms = *rmsF;
bRMS = *bestRMS;
if (rms >= bRMS * maxErrorGrowth) return;
}
cudafloat * lgNextLayer = (lg + (NUM_OUTPUTS * NUM_NEURONS));
if (NEURON == 0) lgNextLayer[OUTPUT_NEURON] = localGradientNextLayer[SAMPLE * NUM_OUTPUTS + OUTPUT_NEURON];
int connection = OUTPUT_NEURON * NUM_INPUTS_OUTPUT_NEURON + NEURON + 1;
int threadId = (NEURON * NUM_OUTPUTS + OUTPUT_NEURON);
__syncthreads();
lg[threadId] = weights[connection] * lgNextLayer[OUTPUT_NEURON];
__syncthreads();
int numberElemSum = NUM_OUTPUTS;
for(int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) {
int nextNumberElemSum = sumUpTo;
if (numberElemSum & 1) nextNumberElemSum++;
if (OUTPUT_NEURON < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum];
numberElemSum = nextNumberElemSum;
__syncthreads();
}
if (OUTPUT_NEURON == 0) {
cudafloat lgn = CUDA_VALUE(0.0);
int n = SAMPLE * NUM_NEURONS + NEURON;
cudafloat i = inputs[n];
if (!IsInfOrNaN(i)) {
cudafloat w = selectiveNeuronsWeights[NEURON];
cudafloat b = selectiveNeuronsBias[NEURON];
if (w != CUDA_VALUE(0.0) || b != CUDA_VALUE(0.0)) { // input may have missing values
cudafloat coshfx = CUDA_COSH(i * w + b);
lgn = lg[threadId] / (coshfx * coshfx); // derivate = 1 / (coshfx * coshfx)
}
}
localGradient[n] = lgn;
}
}
} |
1ac7999bc3b23778bc460838525eefc1e0fabf55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/gpu_002_cost_mapping.cuh"
__device__ inline Point2I32 threadOnGlobalMap(int idx, int idy, int pose_x, int pose_y, int cmap_refresh_radius_pix)
{
Point2I32 point;
point.x = pose_x - cmap_refresh_radius_pix + idx;
point.y = pose_y - cmap_refresh_radius_pix + idy;
return point;
}
__device__ inline int threadGlobalSector1(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius;
point.y = pix.y - cost_mask_radius;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector2(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius + block_x;
point.y = pix.y - cost_mask_radius;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector3(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius;
point.y = pix.y - cost_mask_radius + block_y;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector4(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius + block_x;
point.y = pix.y - cost_mask_radius + block_y;
return point.x * map_y + point.y;
}
__device__ inline int threadSharedSector1(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return sidx * shared_dim_y + sidy;
}
__device__ inline int threadSharedSector2(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return (sidx + block_x) * shared_dim_y + sidy;
}
__device__ inline int threadSharedSector3(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return sidx * shared_dim_y + (sidy + block_y);
}
__device__ inline int threadSharedSector4(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return (sidx + block_x) * shared_dim_y + (sidy + block_y);
}
__device__ inline void loadMapToShared(
const int16_t* heightmap,
int16_t* costmap,
int16_t* smem_hmap,
const int cost_mask_radius,
const int sidx,
const int sidy,
const int shared_dim_x,
const int shared_dim_y,
const Point2I32 pix,
const int block_x,
const int block_y,
const int map_y,
const int cmap_refresh_radius_pix
)
{
int offset = 2 * cost_mask_radius - 2;
int gid_1 = threadGlobalSector1(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_2 = threadGlobalSector2(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_3 = threadGlobalSector3(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_4 = threadGlobalSector4(pix, block_x, block_y, map_y, cost_mask_radius);
int sid_1 = threadSharedSector1(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_2 = threadSharedSector2(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_3 = threadSharedSector3(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_4 = threadSharedSector4(sidx, sidy, block_x, block_y, shared_dim_y);
// 1
smem_hmap[sid_1] = heightmap[gid_1];
// costmap[gid_1] = smem_hmap[sid_1];
// 2
if(sidx < offset)
{
smem_hmap[sid_2] = heightmap[gid_2];
// costmap[gid_2] = smem_hmap[sid_2];
}
//3
if(sidy < offset)
{
smem_hmap[sid_3] = heightmap[gid_3];
// costmap[gid_3] = smem_hmap[sid_3];
}
// 4
if(sidx < offset && sidy < offset)
{
smem_hmap[sid_4] = heightmap[gid_4];
// costmap[gid_4] = smem_hmap[sid_4];
}
}
__device__ void calcCostVariance(
const int16_t *smem_hmap,
int16_t* costmap,
const int cost_mask_radius,
const Point2I32 pix,
const int pix_id,
const int map_x,
const int map_y,
const int stid,
const int sidx,
const int sidy,
const int shared_dim_y,
const int unknown_field_cost
)
{
float avrg = 0;
int mask_dim = (2 * cost_mask_radius) - 1;
int smem_idx = sidx * shared_dim_y + sidy;
int known_heights_conuter = 0;
int unknown_heights_conuter = 0;
for(int x = 0; x < mask_dim; x++)
{
for(int y = 0; y < mask_dim; y++)
{
if(smem_hmap[smem_idx + x*shared_dim_y + y] != UNKNOWN)
{
avrg += (float) smem_hmap[smem_idx + x*shared_dim_y + y];
known_heights_conuter++;
}
else
{
unknown_heights_conuter++;
}
}
}
avrg /= (float) known_heights_conuter;
float variance = 0;
for(int x = 0; x < mask_dim; x++)
{
for(int y = 0; y < mask_dim; y++)
{
if(smem_hmap[smem_idx + x*shared_dim_y + y] != UNKNOWN)
{
float diff = (float) (avrg - smem_hmap[smem_idx + x*shared_dim_y + y]);
variance += diff * diff;
}
}
}
variance = sqrtf(variance);
int pixel_cost = variance + unknown_heights_conuter * unknown_field_cost;
costmap[pix_id] = (int16_t) (pixel_cost);
}
__global__ void costMappingKernel(
const int16_t *heightmap,
int16_t* costmap,
const int map_x,
const int map_y,
const int pose_x,
const int pose_y,
const int shared_dim_x,
const int shared_dim_y,
const int cmap_refresh_radius_pix,
const int cost_mask_radius,
const int unknown_field_cost)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int tid = idy + idx * gridDim.y * blockDim.y;
int sidx = threadIdx.x;
int sidy = threadIdx.y;
int stid = sidx * blockDim.y +sidy;
// TRANSFORMING LOCAL THREAD IDX TO PIXEL IDX IN GLOABAL MAP FRAME
// (FOR THIS PIXEL COST WILL BE CALCULATED)
Point2I32 pix = threadOnGlobalMap(idx, idy, pose_x, pose_y, cmap_refresh_radius_pix);
// INDEX OF PIXEL IN GLOBAL MAP
int pix_id = pix.y + pix.x * map_y;
// SHARED MEMORY FOR STORAGE PART OF HEIGHTMAP NEEDED FOR THIS BLOCK
// ARRAY IS ALLOCATED WITH DYNAMIC SIZE (NOT KNOWN AT COMPILATION - extern)
extern __shared__ int16_t smem_hmap[];
loadMapToShared(
heightmap,
costmap,
smem_hmap,
cost_mask_radius,
sidx,
sidy,
shared_dim_x,
shared_dim_y,
pix,
blockDim.x,
blockDim.y,
map_y,
cmap_refresh_radius_pix
);
__syncthreads();
// CHECKING IF SCAN POINT IS INSIDE GPU MAP, NOT CLOSER FROM EDGE THAN "cost_mask_radius"
if(
pix.x >= cost_mask_radius &&
pix.x <= map_x - cost_mask_radius &&
pix.y >= cost_mask_radius &&
pix.y <= map_y - cost_mask_radius
)
{
// costmap[pix_id] = 100;
//
int dist_x = (pix.x - pose_x);
int dist_y = (pix.y - pose_y);
float dist_from_center = sqrtf((float)dist_x*dist_x + (float)dist_y*dist_y);
if(dist_from_center <= cmap_refresh_radius_pix)
{
calcCostVariance(
smem_hmap,
costmap,
cost_mask_radius,
pix,
pix_id,
map_x,
map_y,
stid,
sidx,
sidy,
shared_dim_y,
unknown_field_cost
);
}
}
// costmap[pix_id] = smem_hmap[stid];
}
GpuCostMapping::GpuCostMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros)
{
this->_rpm = _rpm;
this->_ros = _ros;
}
void GpuCostMapping::drawInitialCostmapBorders()
{
_rpm->dev_costmap.drawBorders(costmap_borders_value, cost_mask_radius);
}
void GpuCostMapping::copyInputToDevice()
{
}
void GpuCostMapping::executeKernel()
{
int block_x = 32;
int block_y = 32;
int size_x = 2*cmap_refresh_radius_pix;
int size_y = 2*cmap_refresh_radius_pix;
int grid_x = (size_x + block_x - 1) / block_x;
int grid_y = (size_y + block_y - 1) / block_y;
dim3 grid(grid_x, grid_y, 1);
dim3 block(block_x, block_y, 1);
int shared_dim_x = block_x + 2 * cost_mask_radius - 2;
int shared_dim_y = block_y + 2 * cost_mask_radius - 2;
int shared_size = shared_dim_x * shared_dim_y * sizeof(int16_t);
hipLaunchKernelGGL(( costMappingKernel) , dim3(grid), dim3(block), shared_size , 0,
_rpm->dev_heightmap.data,
_rpm->dev_costmap.data,
_rpm->dev_costmap.size_x,
_rpm->dev_costmap.size_y,
_rpm->robot_onmap_x,
_rpm->robot_onmap_y,
shared_dim_x,
shared_dim_y,
this->cmap_refresh_radius_pix,
this->cost_mask_radius,
this->unknown_field_cost);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
void GpuCostMapping::copyOutputToHost()
{
gpuErrchk( hipMemcpy(_rpm->host_costmap.data, _rpm->dev_costmap.data, _rpm->dev_costmap.size() * sizeof(int16_t), hipMemcpyDeviceToHost) );
}
void GpuCostMapping::display()
{
_rpm->host_costmap.display("costmap");
}
| 1ac7999bc3b23778bc460838525eefc1e0fabf55.cu | #include "../include/gpu_002_cost_mapping.cuh"
__device__ inline Point2I32 threadOnGlobalMap(int idx, int idy, int pose_x, int pose_y, int cmap_refresh_radius_pix)
{
Point2I32 point;
point.x = pose_x - cmap_refresh_radius_pix + idx;
point.y = pose_y - cmap_refresh_radius_pix + idy;
return point;
}
__device__ inline int threadGlobalSector1(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius;
point.y = pix.y - cost_mask_radius;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector2(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius + block_x;
point.y = pix.y - cost_mask_radius;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector3(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius;
point.y = pix.y - cost_mask_radius + block_y;
return point.x * map_y + point.y;
}
__device__ inline int threadGlobalSector4(Point2I32 pix, int block_x, int block_y, int map_y, int cost_mask_radius)
{
Point2I32 point;
point.x = pix.x - cost_mask_radius + block_x;
point.y = pix.y - cost_mask_radius + block_y;
return point.x * map_y + point.y;
}
__device__ inline int threadSharedSector1(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return sidx * shared_dim_y + sidy;
}
__device__ inline int threadSharedSector2(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return (sidx + block_x) * shared_dim_y + sidy;
}
__device__ inline int threadSharedSector3(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return sidx * shared_dim_y + (sidy + block_y);
}
__device__ inline int threadSharedSector4(int sidx, int sidy, int block_x, int block_y, int shared_dim_y)
{
return (sidx + block_x) * shared_dim_y + (sidy + block_y);
}
__device__ inline void loadMapToShared(
const int16_t* heightmap,
int16_t* costmap,
int16_t* smem_hmap,
const int cost_mask_radius,
const int sidx,
const int sidy,
const int shared_dim_x,
const int shared_dim_y,
const Point2I32 pix,
const int block_x,
const int block_y,
const int map_y,
const int cmap_refresh_radius_pix
)
{
int offset = 2 * cost_mask_radius - 2;
int gid_1 = threadGlobalSector1(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_2 = threadGlobalSector2(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_3 = threadGlobalSector3(pix, block_x, block_y, map_y, cost_mask_radius);
int gid_4 = threadGlobalSector4(pix, block_x, block_y, map_y, cost_mask_radius);
int sid_1 = threadSharedSector1(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_2 = threadSharedSector2(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_3 = threadSharedSector3(sidx, sidy, block_x, block_y, shared_dim_y);
int sid_4 = threadSharedSector4(sidx, sidy, block_x, block_y, shared_dim_y);
// 1
smem_hmap[sid_1] = heightmap[gid_1];
// costmap[gid_1] = smem_hmap[sid_1];
// 2
if(sidx < offset)
{
smem_hmap[sid_2] = heightmap[gid_2];
// costmap[gid_2] = smem_hmap[sid_2];
}
//3
if(sidy < offset)
{
smem_hmap[sid_3] = heightmap[gid_3];
// costmap[gid_3] = smem_hmap[sid_3];
}
// 4
if(sidx < offset && sidy < offset)
{
smem_hmap[sid_4] = heightmap[gid_4];
// costmap[gid_4] = smem_hmap[sid_4];
}
}
__device__ void calcCostVariance(
const int16_t *smem_hmap,
int16_t* costmap,
const int cost_mask_radius,
const Point2I32 pix,
const int pix_id,
const int map_x,
const int map_y,
const int stid,
const int sidx,
const int sidy,
const int shared_dim_y,
const int unknown_field_cost
)
{
float avrg = 0;
int mask_dim = (2 * cost_mask_radius) - 1;
int smem_idx = sidx * shared_dim_y + sidy;
int known_heights_conuter = 0;
int unknown_heights_conuter = 0;
for(int x = 0; x < mask_dim; x++)
{
for(int y = 0; y < mask_dim; y++)
{
if(smem_hmap[smem_idx + x*shared_dim_y + y] != UNKNOWN)
{
avrg += (float) smem_hmap[smem_idx + x*shared_dim_y + y];
known_heights_conuter++;
}
else
{
unknown_heights_conuter++;
}
}
}
avrg /= (float) known_heights_conuter;
float variance = 0;
for(int x = 0; x < mask_dim; x++)
{
for(int y = 0; y < mask_dim; y++)
{
if(smem_hmap[smem_idx + x*shared_dim_y + y] != UNKNOWN)
{
float diff = (float) (avrg - smem_hmap[smem_idx + x*shared_dim_y + y]);
variance += diff * diff;
}
}
}
variance = sqrtf(variance);
int pixel_cost = variance + unknown_heights_conuter * unknown_field_cost;
costmap[pix_id] = (int16_t) (pixel_cost);
}
__global__ void costMappingKernel(
const int16_t *heightmap,
int16_t* costmap,
const int map_x,
const int map_y,
const int pose_x,
const int pose_y,
const int shared_dim_x,
const int shared_dim_y,
const int cmap_refresh_radius_pix,
const int cost_mask_radius,
const int unknown_field_cost)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int tid = idy + idx * gridDim.y * blockDim.y;
int sidx = threadIdx.x;
int sidy = threadIdx.y;
int stid = sidx * blockDim.y +sidy;
// TRANSFORMING LOCAL THREAD IDX TO PIXEL IDX IN GLOABAL MAP FRAME
// (FOR THIS PIXEL COST WILL BE CALCULATED)
Point2I32 pix = threadOnGlobalMap(idx, idy, pose_x, pose_y, cmap_refresh_radius_pix);
// INDEX OF PIXEL IN GLOBAL MAP
int pix_id = pix.y + pix.x * map_y;
// SHARED MEMORY FOR STORAGE PART OF HEIGHTMAP NEEDED FOR THIS BLOCK
// ARRAY IS ALLOCATED WITH DYNAMIC SIZE (NOT KNOWN AT COMPILATION - extern)
extern __shared__ int16_t smem_hmap[];
loadMapToShared(
heightmap,
costmap,
smem_hmap,
cost_mask_radius,
sidx,
sidy,
shared_dim_x,
shared_dim_y,
pix,
blockDim.x,
blockDim.y,
map_y,
cmap_refresh_radius_pix
);
__syncthreads();
// CHECKING IF SCAN POINT IS INSIDE GPU MAP, NOT CLOSER FROM EDGE THAN "cost_mask_radius"
if(
pix.x >= cost_mask_radius &&
pix.x <= map_x - cost_mask_radius &&
pix.y >= cost_mask_radius &&
pix.y <= map_y - cost_mask_radius
)
{
// costmap[pix_id] = 100;
//
int dist_x = (pix.x - pose_x);
int dist_y = (pix.y - pose_y);
float dist_from_center = sqrtf((float)dist_x*dist_x + (float)dist_y*dist_y);
if(dist_from_center <= cmap_refresh_radius_pix)
{
calcCostVariance(
smem_hmap,
costmap,
cost_mask_radius,
pix,
pix_id,
map_x,
map_y,
stid,
sidx,
sidy,
shared_dim_y,
unknown_field_cost
);
}
}
// costmap[pix_id] = smem_hmap[stid];
}
GpuCostMapping::GpuCostMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros)
{
this->_rpm = _rpm;
this->_ros = _ros;
}
void GpuCostMapping::drawInitialCostmapBorders()
{
_rpm->dev_costmap.drawBorders(costmap_borders_value, cost_mask_radius);
}
void GpuCostMapping::copyInputToDevice()
{
}
void GpuCostMapping::executeKernel()
{
int block_x = 32;
int block_y = 32;
int size_x = 2*cmap_refresh_radius_pix;
int size_y = 2*cmap_refresh_radius_pix;
int grid_x = (size_x + block_x - 1) / block_x;
int grid_y = (size_y + block_y - 1) / block_y;
dim3 grid(grid_x, grid_y, 1);
dim3 block(block_x, block_y, 1);
int shared_dim_x = block_x + 2 * cost_mask_radius - 2;
int shared_dim_y = block_y + 2 * cost_mask_radius - 2;
int shared_size = shared_dim_x * shared_dim_y * sizeof(int16_t);
costMappingKernel <<< grid, block, shared_size >>> (
_rpm->dev_heightmap.data,
_rpm->dev_costmap.data,
_rpm->dev_costmap.size_x,
_rpm->dev_costmap.size_y,
_rpm->robot_onmap_x,
_rpm->robot_onmap_y,
shared_dim_x,
shared_dim_y,
this->cmap_refresh_radius_pix,
this->cost_mask_radius,
this->unknown_field_cost);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
void GpuCostMapping::copyOutputToHost()
{
gpuErrchk( cudaMemcpy(_rpm->host_costmap.data, _rpm->dev_costmap.data, _rpm->dev_costmap.size() * sizeof(int16_t), cudaMemcpyDeviceToHost) );
}
void GpuCostMapping::display()
{
_rpm->host_costmap.display("costmap");
}
|
cfbef6c21df054d33614511f3477ccd3a6607e5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <iostream>
#include "open3d/Open3D.h"
#include <stdio.h>
__global__
void outputFromGPU(){
int i = threadIdx.x; // ID of thread 0-31 in this example
printf("Hello world from GPU this is thread #%d\n",i);
}
int main(int argc, char ** argv) {
// default values for command line options
hipLaunchKernelGGL(( outputFromGPU), dim3(1),dim3(32), 0, 0, );
char* path_file=NULL;
// parse command line
for (int i=1; i<argc; i++) {
if (0 == strcmp(argv[i], "-i")) {
i++;
if (i<argc) path_file = argv[i];
}
/*else if (0 == strcmp(argv[i], "-dx")) {
i++;
if (i<argc) opt_dx = atof(argv[i]);
}*/
}
std::cout<<path_file<<"\n";
auto pcd = open3d::io::CreatePointCloudFromFile(path_file);
std::cout<<pcd<<"\n";
std::cout<<pcd->GetCenter()<<"\n";
open3d::visualization::Visualizer visualizer;
std::shared_ptr<open3d::geometry::PointCloud> pcl_ptr(new open3d::geometry::PointCloud);
*pcl_ptr = *pcd;
pcl_ptr->NormalizeNormals();
visualizer.CreateVisualizerWindow("Open3D", 1600, 900);
visualizer.AddGeometry(pcl_ptr);
std::cout<<"RUN WIN"<<"\n";
visualizer.Run();
visualizer.DestroyVisualizerWindow();
std::cout<<"HELLO"<<"\n";
return 0;
} | cfbef6c21df054d33614511f3477ccd3a6607e5a.cu | #include <string>
#include <iostream>
#include "open3d/Open3D.h"
#include <stdio.h>
__global__
void outputFromGPU(){
int i = threadIdx.x; // ID of thread 0-31 in this example
printf("Hello world from GPU this is thread #%d\n",i);
}
int main(int argc, char ** argv) {
// default values for command line options
outputFromGPU<<<1,32>>>();
char* path_file=NULL;
// parse command line
for (int i=1; i<argc; i++) {
if (0 == strcmp(argv[i], "-i")) {
i++;
if (i<argc) path_file = argv[i];
}
/*else if (0 == strcmp(argv[i], "-dx")) {
i++;
if (i<argc) opt_dx = atof(argv[i]);
}*/
}
std::cout<<path_file<<"\n";
auto pcd = open3d::io::CreatePointCloudFromFile(path_file);
std::cout<<pcd<<"\n";
std::cout<<pcd->GetCenter()<<"\n";
open3d::visualization::Visualizer visualizer;
std::shared_ptr<open3d::geometry::PointCloud> pcl_ptr(new open3d::geometry::PointCloud);
*pcl_ptr = *pcd;
pcl_ptr->NormalizeNormals();
visualizer.CreateVisualizerWindow("Open3D", 1600, 900);
visualizer.AddGeometry(pcl_ptr);
std::cout<<"RUN WIN"<<"\n";
visualizer.Run();
visualizer.DestroyVisualizerWindow();
std::cout<<"HELLO"<<"\n";
return 0;
} |
ef58322baf926f57b77b6e3a75f72341bb3891b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <THHUNN/THHUNN.h>
#include <THH/THHTensor.hpp>
#include <THHUNN/common.h>
#include <THHUNN/upsampling.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <c10/macros/Macros.h>
template<typename Dtype, typename Acctype>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 5> data1, THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][t1][h1][w1];
data2[n][c][t2][h2][w2] = val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = t0lambda * (h0lambda * (w0lambda * data1[n][c][t1][h1][w1]
+ w1lambda * data1[n][c][t1][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1][h1+h1p][w1]
+ w1lambda * data1[n][c][t1][h1+h1p][w1+w1p]))
+ t1lambda * (h0lambda * (w0lambda * data1[n][c][t1+t1p][h1][w1]
+ w1lambda * data1[n][c][t1+t1p][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1+t1p][h1+h1p][w1]
+ w1lambda * data1[n][c][t1+t1p][h1+h1p][w1+w1p]));
data2[n][c][t2][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 5> data1, const THCDeviceTensor<Dtype, 5> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][t1][h1][w1];
data1[n][c][t2][h2][w2] += val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][t2][h2][w2];
atomicAdd(data1[n][c][t1][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w1lambda * d2val));
}
}
}
/////////////////////////////////////////////////////////
}
#include <THHUNN/generic/VolumetricUpSamplingTrilinear.hip>
#include <THH/THHGenerateFloatTypes.h>
| ef58322baf926f57b77b6e3a75f72341bb3891b8.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <THCUNN/THCUNN.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/common.h>
#include <THCUNN/upsampling.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <c10/macros/Macros.h>
template<typename Dtype, typename Acctype>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 5> data1, THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][t1][h1][w1];
data2[n][c][t2][h2][w2] = val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = t0lambda * (h0lambda * (w0lambda * data1[n][c][t1][h1][w1]
+ w1lambda * data1[n][c][t1][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1][h1+h1p][w1]
+ w1lambda * data1[n][c][t1][h1+h1p][w1+w1p]))
+ t1lambda * (h0lambda * (w0lambda * data1[n][c][t1+t1p][h1][w1]
+ w1lambda * data1[n][c][t1+t1p][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1+t1p][h1+h1p][w1]
+ w1lambda * data1[n][c][t1+t1p][h1+h1p][w1+w1p]));
data2[n][c][t2][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 5> data1, const THCDeviceTensor<Dtype, 5> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][t1][h1][w1];
data1[n][c][t2][h2][w2] += val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][t2][h2][w2];
atomicAdd(data1[n][c][t1][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w1lambda * d2val));
}
}
}
/////////////////////////////////////////////////////////
}
#include <THCUNN/generic/VolumetricUpSamplingTrilinear.cu>
#include <THC/THCGenerateFloatTypes.h>
|
afbaee60c18689ca75f5e931de0d5bf0ead8616a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "real.h"
#include "math.h"
#define SECTION_SIZE 512
//ACTUALLY THIS SEEMS WRONG: WE DO NOT KNOW THE ORDER OF OPERATIONS OF THE ADDING. NEED TO DOUBLE BUUFER THE ARRAY XY TO GUARATEE THAT THIS WORKS.
__global__ void ksscan_kernel(real* X, real* Y, int inputsize){
__shared__ real XY[SECTION_SIZE];
int i =blockIdx.x*blockDim.x+threadIdx.x;
if (i < inputsize){
XY[threadIdx.x]=X[i];
for (int stride=1; stride<blockDim.x; stride*=2 ){
__syncthreads();
if (threadIdx.x >= stride) XY[threadIdx.x]+= XY[threadIdx.x-stride];
}
Y[i]=XY[threadIdx.x];
}
}
void ksscan(real* d_X, real* d_Y,int inputsize){
hipLaunchKernelGGL(( ksscan_kernel), dim3(ceil(inputsize/ (real) SECTION_SIZE)),dim3(SECTION_SIZE), 0, 0, d_X,d_Y,inputsize);
}
| afbaee60c18689ca75f5e931de0d5bf0ead8616a.cu | #include "real.h"
#include "math.h"
#define SECTION_SIZE 512
//ACTUALLY THIS SEEMS WRONG: WE DO NOT KNOW THE ORDER OF OPERATIONS OF THE ADDING. NEED TO DOUBLE BUUFER THE ARRAY XY TO GUARATEE THAT THIS WORKS.
__global__ void ksscan_kernel(real* X, real* Y, int inputsize){
__shared__ real XY[SECTION_SIZE];
int i =blockIdx.x*blockDim.x+threadIdx.x;
if (i < inputsize){
XY[threadIdx.x]=X[i];
for (int stride=1; stride<blockDim.x; stride*=2 ){
__syncthreads();
if (threadIdx.x >= stride) XY[threadIdx.x]+= XY[threadIdx.x-stride];
}
Y[i]=XY[threadIdx.x];
}
}
void ksscan(real* d_X, real* d_Y,int inputsize){
ksscan_kernel<<<ceil(inputsize/ (real) SECTION_SIZE),SECTION_SIZE>>>(d_X,d_Y,inputsize);
}
|
b7d30313bcf83180a6e05b7d38cee1b52a47dd8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_common.h"
__global__ void create_beta_gk_gpu_kernel
(
int num_gkvec__,
int const* beta_desc__,
hipDoubleComplex const* beta_gk_t,
double const* gkvec,
double const* atom_pos,
hipDoubleComplex* beta_gk
)
{
int ia = blockIdx.y;
int igk = blockDim.x * blockIdx.x + threadIdx.x;
int nbf = beta_desc__[array2D_offset(0, ia, 4)];
int offset_beta_gk = beta_desc__[array2D_offset(1, ia, 4)];
int offset_beta_gk_t = beta_desc__[array2D_offset(2, ia, 4)];
if (igk < num_gkvec__)
{
double p = 0;
for (int x = 0; x < 3; x++) p += atom_pos[array2D_offset(x, ia, 3)] * gkvec[array2D_offset(x, igk, 3)];
p *= twopi;
double sinp = sin(p);
double cosp = cos(p);
for (int xi = 0; xi < nbf; xi++)
{
beta_gk[array2D_offset(igk, offset_beta_gk + xi, num_gkvec__)] =
cuCmul(beta_gk_t[array2D_offset(igk, offset_beta_gk_t + xi, num_gkvec__)],
make_cuDoubleComplex(cosp, -sinp));
}
}
}
extern "C" void create_beta_gk_gpu(int num_atoms,
int num_gkvec,
int const* beta_desc,
hipDoubleComplex const* beta_gk_t,
double const* gkvec,
double const* atom_pos,
hipDoubleComplex* beta_gk)
{
CUDA_timer t("create_beta_gk_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gkvec, grid_t.x), num_atoms);
hipLaunchKernelGGL(( create_beta_gk_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, 0,
num_gkvec,
beta_desc,
beta_gk_t,
gkvec,
atom_pos,
beta_gk
);
}
| b7d30313bcf83180a6e05b7d38cee1b52a47dd8f.cu | #include "kernels_common.h"
__global__ void create_beta_gk_gpu_kernel
(
int num_gkvec__,
int const* beta_desc__,
cuDoubleComplex const* beta_gk_t,
double const* gkvec,
double const* atom_pos,
cuDoubleComplex* beta_gk
)
{
int ia = blockIdx.y;
int igk = blockDim.x * blockIdx.x + threadIdx.x;
int nbf = beta_desc__[array2D_offset(0, ia, 4)];
int offset_beta_gk = beta_desc__[array2D_offset(1, ia, 4)];
int offset_beta_gk_t = beta_desc__[array2D_offset(2, ia, 4)];
if (igk < num_gkvec__)
{
double p = 0;
for (int x = 0; x < 3; x++) p += atom_pos[array2D_offset(x, ia, 3)] * gkvec[array2D_offset(x, igk, 3)];
p *= twopi;
double sinp = sin(p);
double cosp = cos(p);
for (int xi = 0; xi < nbf; xi++)
{
beta_gk[array2D_offset(igk, offset_beta_gk + xi, num_gkvec__)] =
cuCmul(beta_gk_t[array2D_offset(igk, offset_beta_gk_t + xi, num_gkvec__)],
make_cuDoubleComplex(cosp, -sinp));
}
}
}
extern "C" void create_beta_gk_gpu(int num_atoms,
int num_gkvec,
int const* beta_desc,
cuDoubleComplex const* beta_gk_t,
double const* gkvec,
double const* atom_pos,
cuDoubleComplex* beta_gk)
{
CUDA_timer t("create_beta_gk_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gkvec, grid_t.x), num_atoms);
create_beta_gk_gpu_kernel <<<grid_b, grid_t>>>
(
num_gkvec,
beta_desc,
beta_gk_t,
gkvec,
atom_pos,
beta_gk
);
}
|
8cd2b06eca9b7643793c189f64b02254881e601c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parrots_cuda_helper.hpp"
#include "sigmoid_focal_loss_cuda_kernel.cuh"
#include "softmax_focal_loss_cuda_kernel.cuh"
void SigmoidFocalLossForwardCUDAKernelLauncher(
const DArrayLite input, const DArrayLite target, const DArrayLite weight,
DArrayLite output, float gamma, float alpha, hipStream_t stream) {
int output_size = output.size();
int num_classes = input.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
hipLaunchKernelGGL(( sigmoid_focal_loss_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), output.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
void SigmoidFocalLossBackwardCUDAKernelLauncher(
const DArrayLite input, const DArrayLite target, const DArrayLite weight,
DArrayLite grad_input, float gamma, float alpha, hipStream_t stream) {
int output_size = grad_input.size();
int num_classes = input.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
hipLaunchKernelGGL(( sigmoid_focal_loss_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), grad_input.ptr<scalar_t>(), gamma,
alpha, num_classes);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
void SoftmaxFocalLossForwardCUDAKernelLauncher(
const DArrayLite softmax, const DArrayLite target, const DArrayLite weight,
DArrayLite output, float gamma, float alpha, hipStream_t stream) {
int output_size = output.size();
int num_classes = softmax.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
softmax.elemType().prim(), ([&] {
hipLaunchKernelGGL(( softmax_focal_loss_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), output.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
void SoftmaxFocalLossBackwardCUDAKernelLauncher(
const DArrayLite softmax, const DArrayLite target, const DArrayLite weight,
DArrayLite buff, DArrayLite grad_input, float gamma, float alpha,
hipStream_t stream) {
int output_size = buff.size();
int num_classes = softmax.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.elemType().prim(), ([&] {
hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda1_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), buff.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
output_size = grad_input.size();
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.elemType().prim(), ([&] {
hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda2_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
buff.ptr<scalar_t>(), grad_input.ptr<scalar_t>(), num_classes);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
| 8cd2b06eca9b7643793c189f64b02254881e601c.cu | #include "parrots_cuda_helper.hpp"
#include "sigmoid_focal_loss_cuda_kernel.cuh"
#include "softmax_focal_loss_cuda_kernel.cuh"
void SigmoidFocalLossForwardCUDAKernelLauncher(
const DArrayLite input, const DArrayLite target, const DArrayLite weight,
DArrayLite output, float gamma, float alpha, cudaStream_t stream) {
int output_size = output.size();
int num_classes = input.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
sigmoid_focal_loss_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), output.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
void SigmoidFocalLossBackwardCUDAKernelLauncher(
const DArrayLite input, const DArrayLite target, const DArrayLite weight,
DArrayLite grad_input, float gamma, float alpha, cudaStream_t stream) {
int output_size = grad_input.size();
int num_classes = input.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
sigmoid_focal_loss_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), grad_input.ptr<scalar_t>(), gamma,
alpha, num_classes);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
void SoftmaxFocalLossForwardCUDAKernelLauncher(
const DArrayLite softmax, const DArrayLite target, const DArrayLite weight,
DArrayLite output, float gamma, float alpha, cudaStream_t stream) {
int output_size = output.size();
int num_classes = softmax.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
softmax.elemType().prim(), ([&] {
softmax_focal_loss_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), output.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
void SoftmaxFocalLossBackwardCUDAKernelLauncher(
const DArrayLite softmax, const DArrayLite target, const DArrayLite weight,
DArrayLite buff, DArrayLite grad_input, float gamma, float alpha,
cudaStream_t stream) {
int output_size = buff.size();
int num_classes = softmax.dim(1);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.elemType().prim(), ([&] {
softmax_focal_loss_backward_cuda1_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
weight.ptr<scalar_t>(), buff.ptr<scalar_t>(), gamma, alpha,
num_classes);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
output_size = grad_input.size();
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.elemType().prim(), ([&] {
softmax_focal_loss_backward_cuda2_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.ptr<scalar_t>(), target.ptr<int64_t>(),
buff.ptr<scalar_t>(), grad_input.ptr<scalar_t>(), num_classes);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
|
6fd2f028fd8aa93b9ab05844972333b230eb511e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2021 Pierre Talbot, Frdric Pinel
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <algorithm>
#include <cstdio>
#include <chrono>
#include <thread>
#include "solver.cuh"
#include "vstore.cuh"
#include "propagators.cuh"
#include "cuda_helper.hpp"
#include "statistics.cuh"
#include "status.cuh"
#include "search.cuh"
__device__ int decomposition = 0;
// #define SHMEM_SIZE 65536
#define SHMEM_SIZE 44000
#define IN_GLOBAL_MEMORY
CUDA_GLOBAL void search_k(
Array<Pointer<TreeAndPar>>* trees,
VStore* root,
Array<Pointer<Propagator>>* props,
Array<Var>* branching_vars,
Pointer<Interval>* best_bound,
Array<VStore>* best_sols,
Var minimize_x,
Array<Statistics>* blocks_stats,
int subproblems_power,
bool* stop)
{
#ifndef IN_GLOBAL_MEMORY
extern __shared__ int shmem[];
const int n = SHMEM_SIZE;
#endif
int tid = threadIdx.x;
int nodeid = blockIdx.x;
int stride = blockDim.x;
__shared__ int curr_decomposition;
__shared__ int decomposition_size;
int subproblems = pow(2, subproblems_power);
if (tid == 0) {
decomposition_size = subproblems_power;
INFO(printf("decomposition = %d, %d\n", decomposition_size, subproblems));
#ifdef IN_GLOBAL_MEMORY
GlobalAllocator allocator;
#else
SharedAllocator allocator(shmem, n);
#endif
(*trees)[nodeid].reset(new(allocator) TreeAndPar(
*root, *props, *branching_vars, **best_bound, minimize_x, allocator));
curr_decomposition = atomicAdd(&decomposition, 1);
}
__syncthreads();
while(curr_decomposition < subproblems && !(*stop)) {
INFO(if(tid == 0) printf("Block %d with decomposition %d.\n", nodeid, curr_decomposition));
(*trees)[nodeid]->search(tid, stride, *root, curr_decomposition, decomposition_size, *stop);
if (tid == 0) {
Statistics latest = (*trees)[nodeid]->statistics();
if(latest.best_bound != -1 && latest.best_bound < (*blocks_stats)[nodeid].best_bound) {
(*best_sols)[nodeid].reset((*trees)[nodeid]->best());
}
(*blocks_stats)[nodeid].join(latest);
curr_decomposition = atomicAdd(&decomposition, 1);
}
__syncthreads();
}
INFO(if(tid == 0) printf("Block %d quits %d.\n", nodeid, (*blocks_stats)[nodeid].best_bound));
// if(tid == 0)
// printf("%d: Block %d quits %d.\n", tid, nodeid, (*blocks_stats)[nodeid].best_bound);
}
// Inspired by https://stackoverflow.com/questions/39513830/launch-cuda-kernel-with-a-timeout/39514902
// Timeout expected in seconds.
void guard_timeout(int timeout, bool& stop) {
int progressed = 0;
while (!stop) {
std::this_thread::sleep_for(std::chrono::seconds(1));
progressed += 1;
if (progressed >= timeout) {
stop = true;
}
}
}
void solve(VStore* vstore, Constraints constraints, Var minimize_x, Configuration config)
{
// INFO(constraints.print(*vstore));
Array<Var>* branching_vars = constraints.branching_vars();
LOG(std::cout << "Start transfering propagator to device memory." << std::endl);
auto t1 = std::chrono::high_resolution_clock::now();
Array<Pointer<Propagator>>* props = new(managed_allocator) Array<Pointer<Propagator>>(constraints.size());
LOG(std::cout << "props created " << props->size() << std::endl);
for (auto p : constraints.propagators) {
LOG(p->print(*vstore));
LOG(std::cout << std::endl);
(*props)[p->uid].reset(p->to_device());
}
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
LOG(std::cout << "Finish transfering propagators to device memory (" << duration << " ms)" << std::endl);
t1 = std::chrono::high_resolution_clock::now();
Array<Pointer<TreeAndPar>>* trees = new(managed_allocator) Array<Pointer<TreeAndPar>>(config.or_nodes);
Pointer<Interval>* best_bound = new(managed_allocator) Pointer<Interval>(Interval());
Array<VStore>* best_sols = new(managed_allocator) Array<VStore>(*vstore, config.or_nodes);
Array<Statistics>* blocks_stats = new(managed_allocator) Array<Statistics>(config.or_nodes);
bool* stop = new(managed_allocator) bool(false);
// hipFuncSetAttribute(search_k, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SIZE);
int and_nodes = min((int)props->size(), config.and_nodes);
hipLaunchKernelGGL(( search_k), dim3(config.or_nodes), and_nodes
#ifndef IN_GLOBAL_MEMORY
, SHMEM_SIZE
#endif
, 0, trees, vstore, props, branching_vars, best_bound, best_sols, minimize_x, blocks_stats, config.subproblems_power, stop);
std::thread timeout_thread(guard_timeout, config.timeout, std::ref(*stop));
CUDIE(hipDeviceSynchronize());
*stop = true;
t2 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
timeout_thread.join();
Statistics statistics;
for(int i = 0; i < blocks_stats->size(); ++i) {
statistics.join((*blocks_stats)[i]);
}
GlobalStatistics gstats(vstore->size(), constraints.size(), duration, statistics);
gstats.print();
operator delete(best_bound, managed_allocator);
operator delete(props, managed_allocator);
operator delete(trees, managed_allocator);
operator delete(branching_vars, managed_allocator);
operator delete(best_bound, managed_allocator);
operator delete(best_sols, managed_allocator);
}
| 6fd2f028fd8aa93b9ab05844972333b230eb511e.cu | // Copyright 2021 Pierre Talbot, Frédéric Pinel
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <algorithm>
#include <cstdio>
#include <chrono>
#include <thread>
#include "solver.cuh"
#include "vstore.cuh"
#include "propagators.cuh"
#include "cuda_helper.hpp"
#include "statistics.cuh"
#include "status.cuh"
#include "search.cuh"
__device__ int decomposition = 0;
// #define SHMEM_SIZE 65536
#define SHMEM_SIZE 44000
#define IN_GLOBAL_MEMORY
CUDA_GLOBAL void search_k(
Array<Pointer<TreeAndPar>>* trees,
VStore* root,
Array<Pointer<Propagator>>* props,
Array<Var>* branching_vars,
Pointer<Interval>* best_bound,
Array<VStore>* best_sols,
Var minimize_x,
Array<Statistics>* blocks_stats,
int subproblems_power,
bool* stop)
{
#ifndef IN_GLOBAL_MEMORY
extern __shared__ int shmem[];
const int n = SHMEM_SIZE;
#endif
int tid = threadIdx.x;
int nodeid = blockIdx.x;
int stride = blockDim.x;
__shared__ int curr_decomposition;
__shared__ int decomposition_size;
int subproblems = pow(2, subproblems_power);
if (tid == 0) {
decomposition_size = subproblems_power;
INFO(printf("decomposition = %d, %d\n", decomposition_size, subproblems));
#ifdef IN_GLOBAL_MEMORY
GlobalAllocator allocator;
#else
SharedAllocator allocator(shmem, n);
#endif
(*trees)[nodeid].reset(new(allocator) TreeAndPar(
*root, *props, *branching_vars, **best_bound, minimize_x, allocator));
curr_decomposition = atomicAdd(&decomposition, 1);
}
__syncthreads();
while(curr_decomposition < subproblems && !(*stop)) {
INFO(if(tid == 0) printf("Block %d with decomposition %d.\n", nodeid, curr_decomposition));
(*trees)[nodeid]->search(tid, stride, *root, curr_decomposition, decomposition_size, *stop);
if (tid == 0) {
Statistics latest = (*trees)[nodeid]->statistics();
if(latest.best_bound != -1 && latest.best_bound < (*blocks_stats)[nodeid].best_bound) {
(*best_sols)[nodeid].reset((*trees)[nodeid]->best());
}
(*blocks_stats)[nodeid].join(latest);
curr_decomposition = atomicAdd(&decomposition, 1);
}
__syncthreads();
}
INFO(if(tid == 0) printf("Block %d quits %d.\n", nodeid, (*blocks_stats)[nodeid].best_bound));
// if(tid == 0)
// printf("%d: Block %d quits %d.\n", tid, nodeid, (*blocks_stats)[nodeid].best_bound);
}
// Inspired by https://stackoverflow.com/questions/39513830/launch-cuda-kernel-with-a-timeout/39514902
// Timeout expected in seconds.
void guard_timeout(int timeout, bool& stop) {
int progressed = 0;
while (!stop) {
std::this_thread::sleep_for(std::chrono::seconds(1));
progressed += 1;
if (progressed >= timeout) {
stop = true;
}
}
}
void solve(VStore* vstore, Constraints constraints, Var minimize_x, Configuration config)
{
// INFO(constraints.print(*vstore));
Array<Var>* branching_vars = constraints.branching_vars();
LOG(std::cout << "Start transfering propagator to device memory." << std::endl);
auto t1 = std::chrono::high_resolution_clock::now();
Array<Pointer<Propagator>>* props = new(managed_allocator) Array<Pointer<Propagator>>(constraints.size());
LOG(std::cout << "props created " << props->size() << std::endl);
for (auto p : constraints.propagators) {
LOG(p->print(*vstore));
LOG(std::cout << std::endl);
(*props)[p->uid].reset(p->to_device());
}
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
LOG(std::cout << "Finish transfering propagators to device memory (" << duration << " ms)" << std::endl);
t1 = std::chrono::high_resolution_clock::now();
Array<Pointer<TreeAndPar>>* trees = new(managed_allocator) Array<Pointer<TreeAndPar>>(config.or_nodes);
Pointer<Interval>* best_bound = new(managed_allocator) Pointer<Interval>(Interval());
Array<VStore>* best_sols = new(managed_allocator) Array<VStore>(*vstore, config.or_nodes);
Array<Statistics>* blocks_stats = new(managed_allocator) Array<Statistics>(config.or_nodes);
bool* stop = new(managed_allocator) bool(false);
// cudaFuncSetAttribute(search_k, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SIZE);
int and_nodes = min((int)props->size(), config.and_nodes);
search_k<<<config.or_nodes, and_nodes
#ifndef IN_GLOBAL_MEMORY
, SHMEM_SIZE
#endif
>>>(trees, vstore, props, branching_vars, best_bound, best_sols, minimize_x, blocks_stats, config.subproblems_power, stop);
std::thread timeout_thread(guard_timeout, config.timeout, std::ref(*stop));
CUDIE(cudaDeviceSynchronize());
*stop = true;
t2 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
timeout_thread.join();
Statistics statistics;
for(int i = 0; i < blocks_stats->size(); ++i) {
statistics.join((*blocks_stats)[i]);
}
GlobalStatistics gstats(vstore->size(), constraints.size(), duration, statistics);
gstats.print();
operator delete(best_bound, managed_allocator);
operator delete(props, managed_allocator);
operator delete(trees, managed_allocator);
operator delete(branching_vars, managed_allocator);
operator delete(best_bound, managed_allocator);
operator delete(best_sols, managed_allocator);
}
|
72a79902557fdcf97c956ffad1fe805ffcdb1f68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <iostream>
#include "../include/stockham_mont.h"
#include "../include/defines.h"
#include "../include/montmulmod.h"
#include "../include/cudautils.h"
/**
* The Stockham FFT can be computed with the following sequence of operations
*
* (DFT_2 @ I_{2^{k-1}}) (D_{2, 2^{k-i-1}}@I_{2^i}) (L_2^{2^{k-i}}@I_{2^i})
*
* for i from k - 1 down to 0.
*/
///////////////////////////////////////////////////////////////////////////////
/**
* @fp, the Fourier prime struct used in this file
*/
__constant__ fprime_t fp_d;
/**
* Initialize of fourier prime structure
*/
static inline void setup_const(const fprime_t * const fpp) {
hipMemcpyToSymbol(fp_d, fpp, sizeof(fprime_t));
}
///////////////////////////////////////////////////////////////////////////////
// Implementation of stride permutation, device version //
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
#define E_THD (7)
#define N_THD (1 << E_THD)
// each thread block uses N_SHD integers in the shared memory
#define E_SHD (1 + E_THD)
#define N_SHD (1 << E_SHD)
// the number of data each thread moves
#define N_DAT (1 << (E_SHD - E_THD))
/**
* @X, input array of length n = 2^k
* @Y, output array of length n = 2^k
* @e, index from 0 to k - 1
*
* Compute
*
* Y = (L_2^{2^{k-e}}@I_{2^e}) X
*
* with the case that
*
* s = 2^e >= N_SHD
*
* or at least one thread block move s data.
*
*/
__global__
void stride_transpose2_kernel_a(sfixn *Y, const sfixn * const X, sfixn k, sfixn e)
{
__shared__ sfixn block[N_SHD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// delta = s / N_SHD;
sfixn exp_delta = e - E_SHD;
// iq = quo(bid, delta) and ir = rem(bid, delta)
sfixn iq = bid >> exp_delta;
sfixn ir = bid & ((1 << exp_delta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is iq * s + ir * N_SHD
sfixn i;
sfixn *shared = block;
const sfixn *din = X + (iq << e) + (ir << E_SHD);
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
shared[threadIdx.x] = din[threadIdx.x];
din += N_THD;
shared += N_THD;
}
__syncthreads();
// write data out to Y
//
// the output offset for this block is
// rem(iq, 2) * n / 2 + quo(iq, 2) * s + ir * N_SHD
sfixn *dout = Y + (iqr << (k - 1)) + (iqq << e) + (ir << E_SHD);
shared = block;
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
dout[threadIdx.x] = shared[threadIdx.x];
dout += N_THD;
shared += N_THD;
}
__syncthreads();
}
/**
* @X, input array of length n = 2^k
* @Y, output array of length n = 2^k
* @e, index from 0 to k - 1
*
* Compute
*
* Y = (L_2^{2^{k-e}}@I_{2^e}) X
*
* with the case that
*
* s = 2^e < N_SHD
*
* or one thread block moves more than one s data.
*
*/
__global__ void
stride_transpose2_kernel_b(sfixn *Y, const sfixn * const X, sfixn k, sfixn e)
{
__shared__ sfixn block[N_SHD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_SHD
sfixn i;
sfixn *shared = block;
const sfixn *din = X + (bid << E_SHD);
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
shared[threadIdx.x] = din[threadIdx.x];
din += N_THD;
shared += N_THD;
}
__syncthreads();
// offset0 = bid * N_SHD / 2
// offset1 = bid * N_SHD / 2 + n / 2
// base = Y + offset0
sfixn *base = Y + (bid << (E_SHD - 1));
sfixn *dout;
sfixn tid, iq, ir, fi;
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
// virtual thread id in each loop
tid = (i << E_THD) + threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
iq = tid >> e;
ir = tid & ((1 << e) - 1);
// f(i) = (rem(2iq, N_SHD/s) + quo(2iq, N_SHD/s)) * s + ir
fi = (iq << 1) >> (E_SHD - e);
fi += (iq << 1) & ((1 << (E_SHD - e)) - 1);
fi <<= e;
fi += ir;
//if (tid < N_SHD/2)
// dout[tid] = block[fi];
//else
// dout[tid - N_SHD / 2 + (1 << (k-1))] = block[fi];
dout = base + (tid >> (E_SHD-1)) * ((1 << (k-1)) - (1 << (E_SHD-1)));
dout[tid] = block[fi];
}
}
/**
* @X, device array of length n = 2^k
* @Y, device array of length n = 2^k (output)
* @i, the exponent of the stride s = 2^i
*
* Require 0 <= i <= k - 2
*/
void stride_transpose2(sfixn *Y, const sfixn * const X, sfixn k, sfixn i)
{
if (DEBUG) assert((i >= 0) && (i < k - 1));
sfixn nThread = N_THD;
sfixn nb = ((sfixn)1 << (k - E_SHD));
dim3 nBlock(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (i >= E_SHD) {
hipLaunchKernelGGL(( stride_transpose2_kernel_a), dim3(nBlock), dim3(nThread), 0, 0, Y, X, k, i);
} else {
hipLaunchKernelGGL(( stride_transpose2_kernel_b), dim3(nBlock), dim3(nThread), 0, 0, Y, X, k, i);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_SHD
#undef N_SHD
#undef N_DAT
///////////////////////////////////////////////////////////////////////////////
// Implementation of the twiddle matrix multiply //
///////////////////////////////////////////////////////////////////////////////
/**
* @ compute x[i] = frep(w^i) for i from 0 to n - 1
*/
__device__ __host__ inline
void get_mont_root_power(sfixn w, sfixn *X, sfixn n, const fprime_t * const fpp)
{
X[0] = fpp->r;
X[1] = w = frep(w, fpp);
for (sfixn i = 2; i < n; ++i) {
X[i] = fourier_reduction(X[i-1], w, fpp);
}
}
#define E_THD (7)
#define N_THD (1 << E_THD)
#define E_DAT (0)
#define N_DAT (1 << E_DAT)
#define E_ELE (E_THD + E_DAT)
#define N_ELE (1 << E_ELE)
/**
* @X, input/output array of length n = 2^k
* @W, array of primitive roots
* @i, step index
* @fpp, prime structure
*
* Compute X = (D_{2, 2^{k - i - 1}} @ I_{2^i}) X, where
*
* D_{2, 2^{k - i - 1}} is a matrix of size 2^{k-i} X 2^{k-i}
*
* ------------------------------------------------------------
*
* For example, let w^8 = -1 be a 16-th primitive root of unity
*
* i = 0, D_{2, 8} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w ]
* [ w^2 ]
* [ w^3 ]
* [ w^4 ]
* [ w^5 ]
* [ w^6 ]
* [ w^7 ]
*
* i = 1, D_{2, 4} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w^2 ]
* [ w^4 ]
* [ w^6 ]
*
* and i = 2, D_{2, 2} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w^4 ]
*
* Hence the primitive root of unity will be used as follows:
*
* i = 2, [1, w^4]
* i = 1, [1, w^2, w^4, w^6]
* i = 0, [1, w, w^2, w^3, w^4, w^5, w^6, w^7]
*
* ------------------------------------------------------------
*
* D_{2, 2^{k - i - 1}} @ I_{2^i} is the diagonal matrix with each entry
* repeated 2^i times, resulting a matrix of size 2^k X 2^k
*
* Each time, only half of data will be touched since the first half
* of the diagonal matrix consists of 1 only. The total number of blocks
* is 2^{k-1} / N_ELE.
*
*/
/**
* stride_twiddle_kernel_a, multiple thread blocks (>1) handle a stride.
*
* the number of blocks is s / N_ELE
*
* Require k > i > E_ELE.
*
*/
__global__
void stride_twiddle_kernel_a(sfixn *X, const sfixn * const W, sfixn k, sfixn i)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// root used for the thread block is wn^(s*e), with e = quo(bid, s/N_ELE)
sfixn w = W[(bid >> (i - E_ELE)) << i];
// starting position for this block
// the first 2^(k - i - 1) * 2^i = 2^(k - 1) elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (k - 1)) + (bid << E_ELE);
// virtual thread id
sfixn tid;
#pragma unroll
for (sfixn j = 0; j < N_DAT; ++j) {
tid = (j << E_THD) + threadIdx.x;
base[tid] = fourier_reduction(w, base[tid], &fp_d);
}
}
/**
* stride_twiddle_kernel_b, one thread blocks handles multiple strides.
*
* the number of strides is N_ELE / s
*
* Require 0 <= i <= E_ELE.
*
*/
__global__ void
stride_twiddle_kernel_b(sfixn *X, const sfixn * const W, sfixn k, sfixn i)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// starting position for this block
// the first 2^(k - i - 1) * 2^i = 2^(k - 1) elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (k - 1)) + (bid << E_ELE);
// virtual thread id
sfixn tid;
// the starting root for the thread block is wn^(e * s)
// with e = bid * (N_ELE / s). Thus e * s = bid * E_ELE.
sfixn iq, iq_base = (bid << E_ELE);
#pragma unroll
for (sfixn j = 0; j < N_DAT; ++j) {
tid = (j << E_THD) + threadIdx.x;
// the exponent is iq_base + s * quo(tid, s)
iq = iq_base + ((tid >> i) << i);
base[tid] = fourier_reduction(W[iq], base[tid], &fp_d);
}
}
/**
* @X, input/output array of length 2^k
* @W, array of primitive roots
* @i, step index
*
* Require 0 <= i <= k - 2
*/
void stride_twiddle(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, bool mont)
{
if (DEBUG) assert((i >= 0) && (i < k - 1) && (k > E_ELE));
sfixn nThread = (1 << E_THD);
// nblock is 2^{k-1} / N_ELE
sfixn nb = (1 << (k - 1 - E_ELE));
dim3 nBlock(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (i > E_ELE) {
hipLaunchKernelGGL(( stride_twiddle_kernel_a), dim3(nBlock), dim3(nThread), 0, 0, X, W, k, i);
} else {
hipLaunchKernelGGL(( stride_twiddle_kernel_b), dim3(nBlock), dim3(nThread), 0, 0, X, W, k, i);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_DAT
#undef N_DAT
#undef E_ELE
#undef N_ELE
///////////////////////////////////////////////////////////////////////////////
// Implementation of the butterfly operations //
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
#define E_THD (7)
#define N_THD (1 << E_THD)
// the number of butterflyers each thread computes
#define E_BUT (0)
#define N_BUT (1 << E_BUT)
// the number of butterflyers each thread block handles
#define E_ELE (E_THD + E_BUT)
#define N_ELE (1 << E_ELE)
// a butterfly operation is defined as
//
// x0 y0
// \/
// /\
// xs ys
//
// with y0 = x0 + ss and ys = x0 - xs.
// In total, 2 + 2 elements are involved for each butterfly
/**
* @X, device array of length n = 2^k
* @Y, device array of length n = 2^k (output)
*
* DFT2 @ I_{2^{k - 1}}
*
*/
__global__
void butterfly_kernel(sfixn *Y, const sfixn * const X, sfixn k, sfixn p) {
// block id,
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
sfixn tid;
sfixn halfn = ((sfixn )1 << (k - 1));
sfixn *B = Y + (bid << E_ELE);
const sfixn *A = X + (bid << E_ELE);
#pragma unroll
for (sfixn i = 0; i < N_BUT; ++i) {
// virtual thread id
tid = (i << E_THD) + threadIdx.x;
B[tid] = add_mod(A[tid], A[tid + halfn], p);
B[tid + halfn] = sub_mod(A[tid], A[tid + halfn], p);
}
}
void butterfly(sfixn *Y, const sfixn * const X, sfixn k, sfixn p) {
sfixn nThread = ((sfixn)1 << (E_THD));
sfixn nb = ((sfixn)1 << (k - E_ELE - 1));
dim3 nBlock(nb, 1, 1);
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (DEBUG) assert(k >= E_ELE + 1);
hipLaunchKernelGGL(( butterfly_kernel), dim3(nBlock), dim3(nThread), 0, 0, Y, X, k, p);
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_BUT
#undef N_BUT
#undef E_ELE
#undef N_ELE
/////////////////////////////
// Stockham FFT //
/////////////////////////////
/**
* @X, input data array of length n = 2^k residing in the device
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by DFT_n(X)
*
* Montgomery's reduction will be used for modular multiplications
*
*/
void stockham_mont(sfixn *X_d, sfixn n, sfixn k, sfixn w, sfixn p)
{
// initialize fourier prime structure
fprime_t fp_h, *fpp = &fp_h;
init_fourier_prime(fpp, p);
setup_const(fpp);
// initialize the primitive roots
sfixn *W_h = new sfixn[n];
get_mont_root_power(w, W_h, n / 2, fpp);
sfixn *W_d;
hipMalloc((void **)&W_d, sizeof(sfixn) * n / 2);
hipMemcpy(W_d, W_h, sizeof(sfixn) * n / 2, hipMemcpyHostToDevice);
// sequence of applications
sfixn *Y_d;
hipMalloc((void **)&Y_d, sizeof(sfixn) * n);
butterfly(Y_d, X_d, k, p);
for (sfixn i = k - 2; i >= 0; --i) {
stride_transpose2(X_d, Y_d, k, i);
stride_twiddle(X_d, W_d, k, i, p, true);
butterfly(Y_d, X_d, k, p);
}
hipMemcpy(X_d, Y_d, sizeof(sfixn)*n, hipMemcpyDeviceToDevice);
delete [] W_h;
hipFree(W_d);
hipFree(Y_d);
}
/**
* @X, input data array of length n = 2^k residing in the host
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by DFT_n(X)
*
*/
void stockham_mont_host(sfixn *X, sfixn n, sfixn k, sfixn w, sfixn p) {
sfixn *X_d;
hipMalloc((void **)&X_d, sizeof(sfixn) * n);
hipMemcpy(X_d, X, sizeof(sfixn) * n, hipMemcpyHostToDevice);
float elapsedTime;
start_timer(0);
///////////////////////////////////////
stockham_mont(X_d, n, k, w, p);
///////////////////////////////////////
stop_timer(0, elapsedTime);
printf("%2d\tmont_fft_no_transfer\t%8.3f\t", k, elapsedTime);
hipMemcpy(X, X_d, sizeof(sfixn) * n, hipMemcpyDeviceToHost);
hipFree(X_d);
if (DEBUG) checkCudaError("error found in stockham_mont");
}
| 72a79902557fdcf97c956ffad1fe805ffcdb1f68.cu | #include <cassert>
#include <iostream>
#include "../include/stockham_mont.h"
#include "../include/defines.h"
#include "../include/montmulmod.h"
#include "../include/cudautils.h"
/**
* The Stockham FFT can be computed with the following sequence of operations
*
* (DFT_2 @ I_{2^{k-1}}) (D_{2, 2^{k-i-1}}@I_{2^i}) (L_2^{2^{k-i}}@I_{2^i})
*
* for i from k - 1 down to 0.
*/
///////////////////////////////////////////////////////////////////////////////
/**
* @fp, the Fourier prime struct used in this file
*/
__constant__ fprime_t fp_d;
/**
* Initialize of fourier prime structure
*/
static inline void setup_const(const fprime_t * const fpp) {
cudaMemcpyToSymbol(fp_d, fpp, sizeof(fprime_t));
}
///////////////////////////////////////////////////////////////////////////////
// Implementation of stride permutation, device version //
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
#define E_THD (7)
#define N_THD (1 << E_THD)
// each thread block uses N_SHD integers in the shared memory
#define E_SHD (1 + E_THD)
#define N_SHD (1 << E_SHD)
// the number of data each thread moves
#define N_DAT (1 << (E_SHD - E_THD))
/**
* @X, input array of length n = 2^k
* @Y, output array of length n = 2^k
* @e, index from 0 to k - 1
*
* Compute
*
* Y = (L_2^{2^{k-e}}@I_{2^e}) X
*
* with the case that
*
* s = 2^e >= N_SHD
*
* or at least one thread block move s data.
*
*/
__global__
void stride_transpose2_kernel_a(sfixn *Y, const sfixn * const X, sfixn k, sfixn e)
{
__shared__ sfixn block[N_SHD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// delta = s / N_SHD;
sfixn exp_delta = e - E_SHD;
// iq = quo(bid, delta) and ir = rem(bid, delta)
sfixn iq = bid >> exp_delta;
sfixn ir = bid & ((1 << exp_delta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is iq * s + ir * N_SHD
sfixn i;
sfixn *shared = block;
const sfixn *din = X + (iq << e) + (ir << E_SHD);
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
shared[threadIdx.x] = din[threadIdx.x];
din += N_THD;
shared += N_THD;
}
__syncthreads();
// write data out to Y
//
// the output offset for this block is
// rem(iq, 2) * n / 2 + quo(iq, 2) * s + ir * N_SHD
sfixn *dout = Y + (iqr << (k - 1)) + (iqq << e) + (ir << E_SHD);
shared = block;
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
dout[threadIdx.x] = shared[threadIdx.x];
dout += N_THD;
shared += N_THD;
}
__syncthreads();
}
/**
* @X, input array of length n = 2^k
* @Y, output array of length n = 2^k
* @e, index from 0 to k - 1
*
* Compute
*
* Y = (L_2^{2^{k-e}}@I_{2^e}) X
*
* with the case that
*
* s = 2^e < N_SHD
*
* or one thread block moves more than one s data.
*
*/
__global__ void
stride_transpose2_kernel_b(sfixn *Y, const sfixn * const X, sfixn k, sfixn e)
{
__shared__ sfixn block[N_SHD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_SHD
sfixn i;
sfixn *shared = block;
const sfixn *din = X + (bid << E_SHD);
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
shared[threadIdx.x] = din[threadIdx.x];
din += N_THD;
shared += N_THD;
}
__syncthreads();
// offset0 = bid * N_SHD / 2
// offset1 = bid * N_SHD / 2 + n / 2
// base = Y + offset0
sfixn *base = Y + (bid << (E_SHD - 1));
sfixn *dout;
sfixn tid, iq, ir, fi;
#pragma unroll
for (i = 0; i < N_DAT; ++i) {
// virtual thread id in each loop
tid = (i << E_THD) + threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
iq = tid >> e;
ir = tid & ((1 << e) - 1);
// f(i) = (rem(2iq, N_SHD/s) + quo(2iq, N_SHD/s)) * s + ir
fi = (iq << 1) >> (E_SHD - e);
fi += (iq << 1) & ((1 << (E_SHD - e)) - 1);
fi <<= e;
fi += ir;
//if (tid < N_SHD/2)
// dout[tid] = block[fi];
//else
// dout[tid - N_SHD / 2 + (1 << (k-1))] = block[fi];
dout = base + (tid >> (E_SHD-1)) * ((1 << (k-1)) - (1 << (E_SHD-1)));
dout[tid] = block[fi];
}
}
/**
* @X, device array of length n = 2^k
* @Y, device array of length n = 2^k (output)
* @i, the exponent of the stride s = 2^i
*
* Require 0 <= i <= k - 2
*/
void stride_transpose2(sfixn *Y, const sfixn * const X, sfixn k, sfixn i)
{
if (DEBUG) assert((i >= 0) && (i < k - 1));
sfixn nThread = N_THD;
sfixn nb = ((sfixn)1 << (k - E_SHD));
dim3 nBlock(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (i >= E_SHD) {
stride_transpose2_kernel_a<<<nBlock, nThread>>>(Y, X, k, i);
} else {
stride_transpose2_kernel_b<<<nBlock, nThread>>>(Y, X, k, i);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_SHD
#undef N_SHD
#undef N_DAT
///////////////////////////////////////////////////////////////////////////////
// Implementation of the twiddle matrix multiply //
///////////////////////////////////////////////////////////////////////////////
/**
* @ compute x[i] = frep(w^i) for i from 0 to n - 1
*/
__device__ __host__ inline
void get_mont_root_power(sfixn w, sfixn *X, sfixn n, const fprime_t * const fpp)
{
X[0] = fpp->r;
X[1] = w = frep(w, fpp);
for (sfixn i = 2; i < n; ++i) {
X[i] = fourier_reduction(X[i-1], w, fpp);
}
}
#define E_THD (7)
#define N_THD (1 << E_THD)
#define E_DAT (0)
#define N_DAT (1 << E_DAT)
#define E_ELE (E_THD + E_DAT)
#define N_ELE (1 << E_ELE)
/**
* @X, input/output array of length n = 2^k
* @W, array of primitive roots
* @i, step index
* @fpp, prime structure
*
* Compute X = (D_{2, 2^{k - i - 1}} @ I_{2^i}) X, where
*
* D_{2, 2^{k - i - 1}} is a matrix of size 2^{k-i} X 2^{k-i}
*
* ------------------------------------------------------------
*
* For example, let w^8 = -1 be a 16-th primitive root of unity
*
* i = 0, D_{2, 8} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w ]
* [ w^2 ]
* [ w^3 ]
* [ w^4 ]
* [ w^5 ]
* [ w^6 ]
* [ w^7 ]
*
* i = 1, D_{2, 4} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w^2 ]
* [ w^4 ]
* [ w^6 ]
*
* and i = 2, D_{2, 2} =
*
* [ 1 ]
* [ 1 ]
* [ 1 ]
* [ w^4 ]
*
* Hence the primitive root of unity will be used as follows:
*
* i = 2, [1, w^4]
* i = 1, [1, w^2, w^4, w^6]
* i = 0, [1, w, w^2, w^3, w^4, w^5, w^6, w^7]
*
* ------------------------------------------------------------
*
* D_{2, 2^{k - i - 1}} @ I_{2^i} is the diagonal matrix with each entry
* repeated 2^i times, resulting a matrix of size 2^k X 2^k
*
* Each time, only half of data will be touched since the first half
* of the diagonal matrix consists of 1 only. The total number of blocks
* is 2^{k-1} / N_ELE.
*
*/
/**
* stride_twiddle_kernel_a, multiple thread blocks (>1) handle a stride.
*
* the number of blocks is s / N_ELE
*
* Require k > i > E_ELE.
*
*/
__global__
void stride_twiddle_kernel_a(sfixn *X, const sfixn * const W, sfixn k, sfixn i)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// root used for the thread block is wn^(s*e), with e = quo(bid, s/N_ELE)
sfixn w = W[(bid >> (i - E_ELE)) << i];
// starting position for this block
// the first 2^(k - i - 1) * 2^i = 2^(k - 1) elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (k - 1)) + (bid << E_ELE);
// virtual thread id
sfixn tid;
#pragma unroll
for (sfixn j = 0; j < N_DAT; ++j) {
tid = (j << E_THD) + threadIdx.x;
base[tid] = fourier_reduction(w, base[tid], &fp_d);
}
}
/**
* stride_twiddle_kernel_b, one thread blocks handles multiple strides.
*
* the number of strides is N_ELE / s
*
* Require 0 <= i <= E_ELE.
*
*/
__global__ void
stride_twiddle_kernel_b(sfixn *X, const sfixn * const W, sfixn k, sfixn i)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// starting position for this block
// the first 2^(k - i - 1) * 2^i = 2^(k - 1) elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (k - 1)) + (bid << E_ELE);
// virtual thread id
sfixn tid;
// the starting root for the thread block is wn^(e * s)
// with e = bid * (N_ELE / s). Thus e * s = bid * E_ELE.
sfixn iq, iq_base = (bid << E_ELE);
#pragma unroll
for (sfixn j = 0; j < N_DAT; ++j) {
tid = (j << E_THD) + threadIdx.x;
// the exponent is iq_base + s * quo(tid, s)
iq = iq_base + ((tid >> i) << i);
base[tid] = fourier_reduction(W[iq], base[tid], &fp_d);
}
}
/**
* @X, input/output array of length 2^k
* @W, array of primitive roots
* @i, step index
*
* Require 0 <= i <= k - 2
*/
void stride_twiddle(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, bool mont)
{
if (DEBUG) assert((i >= 0) && (i < k - 1) && (k > E_ELE));
sfixn nThread = (1 << E_THD);
// nblock is 2^{k-1} / N_ELE
sfixn nb = (1 << (k - 1 - E_ELE));
dim3 nBlock(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (i > E_ELE) {
stride_twiddle_kernel_a<<<nBlock, nThread>>>(X, W, k, i);
} else {
stride_twiddle_kernel_b<<<nBlock, nThread>>>(X, W, k, i);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_DAT
#undef N_DAT
#undef E_ELE
#undef N_ELE
///////////////////////////////////////////////////////////////////////////////
// Implementation of the butterfly operations //
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
#define E_THD (7)
#define N_THD (1 << E_THD)
// the number of butterflyers each thread computes
#define E_BUT (0)
#define N_BUT (1 << E_BUT)
// the number of butterflyers each thread block handles
#define E_ELE (E_THD + E_BUT)
#define N_ELE (1 << E_ELE)
// a butterfly operation is defined as
//
// x0 y0
// \/
// /\
// xs ys
//
// with y0 = x0 + ss and ys = x0 - xs.
// In total, 2 + 2 elements are involved for each butterfly
/**
* @X, device array of length n = 2^k
* @Y, device array of length n = 2^k (output)
*
* DFT2 @ I_{2^{k - 1}}
*
*/
__global__
void butterfly_kernel(sfixn *Y, const sfixn * const X, sfixn k, sfixn p) {
// block id,
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
sfixn tid;
sfixn halfn = ((sfixn )1 << (k - 1));
sfixn *B = Y + (bid << E_ELE);
const sfixn *A = X + (bid << E_ELE);
#pragma unroll
for (sfixn i = 0; i < N_BUT; ++i) {
// virtual thread id
tid = (i << E_THD) + threadIdx.x;
B[tid] = add_mod(A[tid], A[tid + halfn], p);
B[tid + halfn] = sub_mod(A[tid], A[tid + halfn], p);
}
}
void butterfly(sfixn *Y, const sfixn * const X, sfixn k, sfixn p) {
sfixn nThread = ((sfixn)1 << (E_THD));
sfixn nb = ((sfixn)1 << (k - E_ELE - 1));
dim3 nBlock(nb, 1, 1);
if (nb > (1 << 15)) { nBlock.x = (1 << 15); nBlock.y = (nb >> 15); }
if (DEBUG) assert(k >= E_ELE + 1);
butterfly_kernel<<<nBlock, nThread>>>(Y, X, k, p);
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
#undef E_BUT
#undef N_BUT
#undef E_ELE
#undef N_ELE
/////////////////////////////
// Stockham FFT //
/////////////////////////////
/**
* @X, input data array of length n = 2^k residing in the device
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by DFT_n(X)
*
* Montgomery's reduction will be used for modular multiplications
*
*/
void stockham_mont(sfixn *X_d, sfixn n, sfixn k, sfixn w, sfixn p)
{
// initialize fourier prime structure
fprime_t fp_h, *fpp = &fp_h;
init_fourier_prime(fpp, p);
setup_const(fpp);
// initialize the primitive roots
sfixn *W_h = new sfixn[n];
get_mont_root_power(w, W_h, n / 2, fpp);
sfixn *W_d;
cudaMalloc((void **)&W_d, sizeof(sfixn) * n / 2);
cudaMemcpy(W_d, W_h, sizeof(sfixn) * n / 2, cudaMemcpyHostToDevice);
// sequence of applications
sfixn *Y_d;
cudaMalloc((void **)&Y_d, sizeof(sfixn) * n);
butterfly(Y_d, X_d, k, p);
for (sfixn i = k - 2; i >= 0; --i) {
stride_transpose2(X_d, Y_d, k, i);
stride_twiddle(X_d, W_d, k, i, p, true);
butterfly(Y_d, X_d, k, p);
}
cudaMemcpy(X_d, Y_d, sizeof(sfixn)*n, cudaMemcpyDeviceToDevice);
delete [] W_h;
cudaFree(W_d);
cudaFree(Y_d);
}
/**
* @X, input data array of length n = 2^k residing in the host
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by DFT_n(X)
*
*/
void stockham_mont_host(sfixn *X, sfixn n, sfixn k, sfixn w, sfixn p) {
sfixn *X_d;
cudaMalloc((void **)&X_d, sizeof(sfixn) * n);
cudaMemcpy(X_d, X, sizeof(sfixn) * n, cudaMemcpyHostToDevice);
float elapsedTime;
start_timer(0);
///////////////////////////////////////
stockham_mont(X_d, n, k, w, p);
///////////////////////////////////////
stop_timer(0, elapsedTime);
printf("%2d\tmont_fft_no_transfer\t%8.3f\t", k, elapsedTime);
cudaMemcpy(X, X_d, sizeof(sfixn) * n, cudaMemcpyDeviceToHost);
cudaFree(X_d);
if (DEBUG) checkCudaError("error found in stockham_mont");
}
|
4bd93f0565be9e75a099cf662fd5c72a58b4f813.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <omp.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorAdd(const float *A, const float *B, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
void checkErr(hipError_t err, const char* msg)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
double runtimeCopyTo, runtimeCompute, runtimeCopyBack, runtimeSeq;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector addition of %lu elements]\n", numElements);
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
float * h_B = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = hipMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
float * d_B = NULL;
err = hipMalloc((void **)&d_B, size);
checkErr(err, "Failed to allocate device vector B");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = hipMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
runtimeCopyTo = -omp_get_wtime();
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
// printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector B from host to device");
runtimeCopyTo += omp_get_wtime();
runtimeCompute = -omp_get_wtime();
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
checkErr(err, "Failed to launch vectorAdd kernel");
runtimeCompute += omp_get_wtime();
runtimeCopyBack = -omp_get_wtime();
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
// printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
checkErr(err, "Failed to copy vector C from device to host");
runtimeCopyBack += omp_get_wtime();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
// printf("CUDA time: %lf\n", stopTime-startTime);
// Free device global memory
err = hipFree(d_A);
checkErr(err, "Failed to free device vector A");
err = hipFree(d_B);
checkErr(err, "Failed to free device vector B");
err = hipFree(d_C);
checkErr(err, "Failed to free device vector C");
runtimeSeq = -omp_get_wtime();
// repeat the computation sequentially
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i] + h_B[i];
}
runtimeSeq += omp_get_wtime();
// verify again
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
// printf("Normal time: %lf\n", stopTime-startTime);
// printf("Time spent (Cuda): \n\tcopying the A and B arrays: \t%f\n\tcomputing the sum: \t\t%f\n\tcopying the C array: \t\t%f\n\tTotal: \t\t\t\t%f\n", runtimeCopyTo, runtimeCopyBack, runtimeCompute, (runtimeCopyTo + runtimeCopyBack + runtimeCompute));
// printf("Time spent (Sequential): \t\t%f\n", runtimeSeq);
printf("Add, %ld, %f, %f, %f, %f, %f\n", numElements, runtimeCopyTo, runtimeCopyBack, runtimeCompute, (runtimeCopyTo + runtimeCopyBack + runtimeCompute), runtimeSeq);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
| 4bd93f0565be9e75a099cf662fd5c72a58b4f813.cu | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <omp.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorAdd(const float *A, const float *B, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
void checkErr(cudaError_t err, const char* msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
double runtimeCopyTo, runtimeCompute, runtimeCopyBack, runtimeSeq;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector addition of %lu elements]\n", numElements);
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
float * h_B = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
float * d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
checkErr(err, "Failed to allocate device vector B");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
runtimeCopyTo = -omp_get_wtime();
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
// printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector B from host to device");
runtimeCopyTo += omp_get_wtime();
runtimeCompute = -omp_get_wtime();
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
checkErr(err, "Failed to launch vectorAdd kernel");
runtimeCompute += omp_get_wtime();
runtimeCopyBack = -omp_get_wtime();
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
// printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
checkErr(err, "Failed to copy vector C from device to host");
runtimeCopyBack += omp_get_wtime();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
// printf("CUDA time: %lf\n", stopTime-startTime);
// Free device global memory
err = cudaFree(d_A);
checkErr(err, "Failed to free device vector A");
err = cudaFree(d_B);
checkErr(err, "Failed to free device vector B");
err = cudaFree(d_C);
checkErr(err, "Failed to free device vector C");
runtimeSeq = -omp_get_wtime();
// repeat the computation sequentially
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i] + h_B[i];
}
runtimeSeq += omp_get_wtime();
// verify again
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
// printf("Normal time: %lf\n", stopTime-startTime);
// printf("Time spent (Cuda): \n\tcopying the A and B arrays: \t%f\n\tcomputing the sum: \t\t%f\n\tcopying the C array: \t\t%f\n\tTotal: \t\t\t\t%f\n", runtimeCopyTo, runtimeCopyBack, runtimeCompute, (runtimeCopyTo + runtimeCopyBack + runtimeCompute));
// printf("Time spent (Sequential): \t\t%f\n", runtimeSeq);
printf("Add, %ld, %f, %f, %f, %f, %f\n", numElements, runtimeCopyTo, runtimeCopyBack, runtimeCompute, (runtimeCopyTo + runtimeCopyBack + runtimeCompute), runtimeSeq);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
|
da6e67b9cca643074df5bbebc0718053ec056b5d.hip | // !!! This is a file automatically generated by hipify!!!
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "helper.cuh"
#include "histogram.cuh"
#define HIST_LENGTH 256
int main(int argc, char **argv)
{
// parse command line parameters
const char *params = {
"{i|image| |input image}"
"{b|bw|false|load input image as grayscale/black-white}"
"{r|repeats|1|number of computation repetitions}"
"{m|mode|0|mode}"
};
cv::CommandLineParser cmd(argc, argv, params);
// input image
std::string inputImage = cmd.get<std::string>("image");
// number of computation repetitions to get a better run time measurement
size_t repeats = (size_t)cmd.get<int>("repeats");
// load the input image as grayscale
bool gray = cmd.get<bool>("bw");
int mode = cmd.get<int>("mode");
// init camera
bool useCam = inputImage.empty();
cv::VideoCapture camera;
if (useCam && !openCamera(camera, 0))
{
std::cerr << "ERROR: Could not open camera" << std::endl;
return 1;
}
// read input frame
cv::Mat mIn;
if (useCam)
{
// read in first frame to get the dimensions
camera >> mIn;
}
else
{
// load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
mIn = cv::imread(inputImage.c_str(), (gray ? CV_LOAD_IMAGE_GRAYSCALE : -1));
}
// check
if (mIn.empty())
{
std::cerr << "ERROR: Could not retrieve frame " << inputImage << std::endl;
return 1;
}
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
int n = nc*h*w;
std::cout << "image: " << w << " x " << h << std::endl;
// initialize CUDA context
hipDeviceSynchronize(); CUDA_CHECK;
// Set the output image format
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// allocate raw input image array
float *imgIn = new float[n]; // TODO allocate array
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
// allocate arrays on GPU
// input
float *d_imgIn = NULL;
// TODO alloc cuda memory for device arrays
hipMalloc(&d_imgIn, n *sizeof(float)); CUDA_CHECK;
// histogram
int nbins = 256;
int *histogram = new int[nbins]; // TODO allocate array
int *d_histogram = NULL;
// TODO (13.1) alloc cuda memory for d_histogram
hipMalloc(&d_histogram, nbins*sizeof(int)); CUDA_CHECK;
// TODO (13.1) reset values of d_histogram to 0
memset(histogram, 0, nbins*sizeof(int));
hipMemcpy(d_histogram, histogram, nbins*sizeof(int), hipMemcpyHostToDevice); CUDA_CHECK;
//hipMemset(&d_histogram, 0, nh*sizeof(int)); CUDA_CHECK;
do
{
// convert range of each channel to [0,1]
mIn /= 255.0f;
// init raw input image array (and convert to layered)
convertMatToLayered(imgIn, mIn);
// upload to GPU
// TODO upload input to device
hipMemcpy(d_imgIn, imgIn, n*sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
Timer timer; timer.start();
// Execute kernel
for(size_t i = 0; i < repeats; ++i)
{
// TODO (13.1) implement computeHistogramCuda() in histogram.cu
if (mode == 0) computeHistogramCuda(d_histogram, d_imgIn, nbins, w, h, nc);
// TODO (13.3) implement computeHistogramCudaShared() in histogram.cu
else if (mode == 1) computeHistogramCudaShared(d_histogram, d_imgIn, nbins, w, h, nc);
else std::cerr << "mode not supported" << std::endl;
hipDeviceSynchronize();
}
float t = timer.get() / (float)repeats; // elapsed time in seconds
std::cout << "time: " << t*1000 << " ms" << std::endl;
// Copy histogram back from GPU
// TODO copy from d_histogram to histogram
hipMemcpy(histogram, d_histogram, nbins*sizeof(int), hipMemcpyDeviceToHost); CUDA_CHECK;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// ### Display your own output images here as needed
// TODO (13.2) show histogram using showHistogram256()
showHistogram256("Histogram", histogram, 100 + w + 40, 100);
if (useCam)
{
// wait 30ms for key input
if (cv::waitKey(30) >= 0)
{
mIn.release();
}
else
{
// retrieve next frame from camera
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
}
}
}
while (useCam && !mIn.empty());
if (!useCam)
{
cv::waitKey(0);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
}
// free allocated arrays
// TODO free cuda memory of all device arrays
hipFree(d_imgIn);
hipFree(d_histogram);
// TODO free memory of all host arrays
delete[] imgIn;
delete[] histogram;
// close all opencv windows
cv::destroyAllWindows();
return 0;
}
| da6e67b9cca643074df5bbebc0718053ec056b5d.cu | // ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "helper.cuh"
#include "histogram.cuh"
#define HIST_LENGTH 256
int main(int argc, char **argv)
{
// parse command line parameters
const char *params = {
"{i|image| |input image}"
"{b|bw|false|load input image as grayscale/black-white}"
"{r|repeats|1|number of computation repetitions}"
"{m|mode|0|mode}"
};
cv::CommandLineParser cmd(argc, argv, params);
// input image
std::string inputImage = cmd.get<std::string>("image");
// number of computation repetitions to get a better run time measurement
size_t repeats = (size_t)cmd.get<int>("repeats");
// load the input image as grayscale
bool gray = cmd.get<bool>("bw");
int mode = cmd.get<int>("mode");
// init camera
bool useCam = inputImage.empty();
cv::VideoCapture camera;
if (useCam && !openCamera(camera, 0))
{
std::cerr << "ERROR: Could not open camera" << std::endl;
return 1;
}
// read input frame
cv::Mat mIn;
if (useCam)
{
// read in first frame to get the dimensions
camera >> mIn;
}
else
{
// load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
mIn = cv::imread(inputImage.c_str(), (gray ? CV_LOAD_IMAGE_GRAYSCALE : -1));
}
// check
if (mIn.empty())
{
std::cerr << "ERROR: Could not retrieve frame " << inputImage << std::endl;
return 1;
}
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
int n = nc*h*w;
std::cout << "image: " << w << " x " << h << std::endl;
// initialize CUDA context
cudaDeviceSynchronize(); CUDA_CHECK;
// Set the output image format
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// allocate raw input image array
float *imgIn = new float[n]; // TODO allocate array
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
// allocate arrays on GPU
// input
float *d_imgIn = NULL;
// TODO alloc cuda memory for device arrays
cudaMalloc(&d_imgIn, n *sizeof(float)); CUDA_CHECK;
// histogram
int nbins = 256;
int *histogram = new int[nbins]; // TODO allocate array
int *d_histogram = NULL;
// TODO (13.1) alloc cuda memory for d_histogram
cudaMalloc(&d_histogram, nbins*sizeof(int)); CUDA_CHECK;
// TODO (13.1) reset values of d_histogram to 0
memset(histogram, 0, nbins*sizeof(int));
cudaMemcpy(d_histogram, histogram, nbins*sizeof(int), cudaMemcpyHostToDevice); CUDA_CHECK;
//cudaMemset(&d_histogram, 0, nh*sizeof(int)); CUDA_CHECK;
do
{
// convert range of each channel to [0,1]
mIn /= 255.0f;
// init raw input image array (and convert to layered)
convertMatToLayered(imgIn, mIn);
// upload to GPU
// TODO upload input to device
cudaMemcpy(d_imgIn, imgIn, n*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
Timer timer; timer.start();
// Execute kernel
for(size_t i = 0; i < repeats; ++i)
{
// TODO (13.1) implement computeHistogramCuda() in histogram.cu
if (mode == 0) computeHistogramCuda(d_histogram, d_imgIn, nbins, w, h, nc);
// TODO (13.3) implement computeHistogramCudaShared() in histogram.cu
else if (mode == 1) computeHistogramCudaShared(d_histogram, d_imgIn, nbins, w, h, nc);
else std::cerr << "mode not supported" << std::endl;
cudaDeviceSynchronize();
}
float t = timer.get() / (float)repeats; // elapsed time in seconds
std::cout << "time: " << t*1000 << " ms" << std::endl;
// Copy histogram back from GPU
// TODO copy from d_histogram to histogram
cudaMemcpy(histogram, d_histogram, nbins*sizeof(int), cudaMemcpyDeviceToHost); CUDA_CHECK;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// ### Display your own output images here as needed
// TODO (13.2) show histogram using showHistogram256()
showHistogram256("Histogram", histogram, 100 + w + 40, 100);
if (useCam)
{
// wait 30ms for key input
if (cv::waitKey(30) >= 0)
{
mIn.release();
}
else
{
// retrieve next frame from camera
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn, CV_32F);
}
}
}
while (useCam && !mIn.empty());
if (!useCam)
{
cv::waitKey(0);
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
}
// free allocated arrays
// TODO free cuda memory of all device arrays
cudaFree(d_imgIn);
cudaFree(d_histogram);
// TODO free memory of all host arrays
delete[] imgIn;
delete[] histogram;
// close all opencv windows
cv::destroyAllWindows();
return 0;
}
|
2245360249854cef354a193347209144a8ecd885.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
extern bool streamcompact_b;
extern bool texturemap_b;
extern bool bumpmap_b;
extern bool DOF_b;
extern bool MB_b;
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
glm::vec3 A = glm::cross(view,up);
glm::vec3 B = glm::cross(A,view);
glm::vec3 M = eye + view;
float angley = fov.y;
float anglex= fov.x;
glm::vec3 H = glm::normalize(A) * glm::length(view) * tan(glm::radians(anglex));
glm::vec3 V = glm::normalize(B) * glm::length(view) * tan(glm::radians(angley));
float sx = ((float)x)/(resolution.x - 1);
float sy = ((float)y)/(resolution.y - 1);
glm::vec3 P = M + (2.0f*sx-1)* H + (1-2.0f*sy) * V; //The picture begins
glm::vec3 D = glm::normalize(P - eye);
ray r;
r.origin = eye;
r.direction = D;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image,float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
color /= iterations;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Core raytracer kernel
__global__ void PathTraceColor(ray* remainrays,int raysnum,int currdepth,int maxdepth,
staticGeom* geoms, int numberOfGeoms, int* lightIndex,
int lightNum,material* materials,float time,uint3* tcolors,int* tnums,bool textureb,uint3* bcolors,int* bnums,bool bumpb)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<=raysnum)
{
ray r = remainrays[index];
if(!r.exist) //If not exist, its new add color is 0
{
r.raycolor = glm::vec3(0,0,0);
remainrays[index] = r;
return;
}
//clear all ray if currdepth == maxdepth
if(currdepth==maxdepth)
{
r.exist = false;
remainrays[index] = r;
return;
}
bool Intersect = false;
glm::vec3 InterSectP,InterSectN;
int IntersectgeomId = -1;
Intersect = Intersecttest(r,InterSectP,InterSectN,geoms,numberOfGeoms,IntersectgeomId);
if(bumpb)
bumpMap(geoms,IntersectgeomId,InterSectN,InterSectP,bcolors,bnums);
//if the ray intersect with nothing, give it black/backgroundcolor
if(Intersect==false)
{
r.raycolor = glm::vec3(0,0,0);
r.exist = false;
remainrays[index] = r;
return;
}
material currMaterial = materials[geoms[IntersectgeomId].materialid];
if(textureb)
textureMap(geoms,IntersectgeomId,currMaterial,InterSectN,InterSectP,tcolors,tnums);
bool IsLight = false;
for(int i=0;i<lightNum;i++)
{
if(IntersectgeomId==lightIndex[i])
IsLight = true;
}
if(IsLight)
{
r.raycolor = r.raycolor * currMaterial.color * currMaterial.emittance;
r.exist = false;
}
else
{
int seed = (index+1) * (time/2 + currdepth);
int BSDF = calculateBSDF(r,InterSectP,InterSectN,currMaterial,seed,currdepth);
r.raycolor = r.raycolor * currMaterial.color;
}
remainrays[index] = r;
}
}
//Changed
__global__ void AddColor(glm::vec3* colors, ray* remainrays,int raysnum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<=raysnum){
ray r = remainrays[index];
if(r.exist==false)
colors[r.initindex] += r.raycolor;
}
}
__global__ void InitRays(ray* rays, glm::vec2 resolution, cameraData cam, float time,bool DOFbool)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y))
{
//anti-aliasing
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0, 1);
//ray r = raycastFromCameraKernel(resolution,0.0f, x + float(u01(rng)) -0.5f, y+float(u01(rng))-0.5f,cam.position,cam.view,cam.up,cam.fov);
ray r = raycastFromCameraKernel(resolution,0.0f, x , y,cam.position,cam.view,cam.up,cam.fov);
if(DOFbool)
{
glm::vec3 rand3 = generateRandomNumberFromThread(resolution, time, x, y);
glm::vec2 rand2 = glm::vec2(rand3.x,rand3.y);
glm::vec3 offset = glm::vec3(rand2.x * cos((float)TWO_PI*rand2.y), rand2.x * sin((float)TWO_PI*rand2.y), 0.0f) * cam.blurradius;
glm::vec3 p = r.origin + r.direction * cam.focallength / glm::dot(cam.view, r.direction);
r.origin = r.origin + offset;
r.direction = glm::normalize(p - r.origin);
}
r.exist = true;
r.initindex = index;
r.raycolor = glm::vec3(1,1,1);
r.IOR = 1.0f;
rays[index] = r;
}
}
struct Is_Exist
{
__host__ __device__
bool operator()(const ray x)
{
if(x.exist) return true;
else return false;
}
};
struct Is_Not_Exist
{
__host__ __device__
bool operator()(const ray x)
{
if(!x.exist) return true;
else return false;
}
};
//Stream Compact
void ThrustStreamCompact(thrust::device_ptr<ray> origin,int &N)
{
//Count how many rays still exist
int finallength = thrust::count_if(origin, origin+N,Is_Exist());
thrust::remove_if(origin, origin+N,Is_Not_Exist());
N = finallength;
return;
}
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials,
int numberOfMaterials, geom* geoms, int numberOfGeoms,std::vector<uint3> mapcolors,std::vector<int> maplastnums,
std::vector<uint3> bumcolors,std::vector<int> bumlastnums){
int traceDepth = 8; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
// send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
newStaticGeom.transinverseTransform = geoms[i].transinverseTransforms[frame];
if(MB_b)
{
newStaticGeom.MBV = geoms[i].MBV[frame];
int tempit = iterations%50;
glm::mat4 transform;
if(tempit>=0&&tempit<25)
{
newStaticGeom.translation += (float)tempit * newStaticGeom.MBV;
transform = utilityCore::buildTransformationMatrix(newStaticGeom.translation, newStaticGeom.rotation, newStaticGeom.scale);
}
else if(tempit>24)
{
newStaticGeom.translation -= (float)tempit * newStaticGeom.MBV;
transform = utilityCore::buildTransformationMatrix(newStaticGeom.translation, newStaticGeom.rotation, newStaticGeom.scale);
}
newStaticGeom.transform = utilityCore::glmMat4ToCudaMat4(transform);
newStaticGeom.inverseTransform = utilityCore::glmMat4ToCudaMat4(glm::inverse(transform));
}
newStaticGeom.tri = geoms[i].tri;
newStaticGeom.trinum = geoms[i].trinum;
newStaticGeom.texindex = geoms[i].texindex;
newStaticGeom.theight = geoms[i].theight;
newStaticGeom.twidth = geoms[i].twidth;
newStaticGeom.bumpindex = geoms[i].bumpindex;
newStaticGeom.bheight = geoms[i].bheight;
newStaticGeom.bwidth = geoms[i].bwidth;
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//materials
material* materialList = new material[numberOfMaterials];
for(int i=0; i<numberOfMaterials; i++){
material newMaterial;
newMaterial.color = materials[i].color;
//specular is useless as the highlight area color is decided by light
newMaterial.specularExponent = materials[i].specularExponent;
newMaterial.specularColor = materials[i].specularColor;
newMaterial.hasReflective = materials[i].hasReflective;
newMaterial.hasRefractive = materials[i].hasRefractive;
newMaterial.indexOfRefraction = materials[i].indexOfRefraction;
newMaterial.hasScatter = materials[i].hasScatter;
newMaterial.absorptionCoefficient = materials[i].absorptionCoefficient;
newMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient;
newMaterial.emittance = materials[i].emittance;
materialList[i] = newMaterial;
}
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamaterials, materialList, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
//light
int lcount = 0;
for(int i=0; i<numberOfGeoms; i++)
{
if(materials[geomList[i].materialid].emittance>0)
lcount++;
}
int *lightIds = new int[lcount];
lcount = 0;
for(int i=0; i<numberOfGeoms; i++)
{
if(materials[geomList[i].materialid].emittance>0)
{
lightIds[lcount] = i;
lcount++;
}
}
int *cudalightIds=NULL;
hipMalloc((void**)&cudalightIds,lcount * sizeof(int));
hipMemcpy( cudalightIds, lightIds,lcount * sizeof(int), hipMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.focallength = renderCam->focall;
cam.blurradius = renderCam->blurr;
//Transfer Texture Map
uint3* cudacolors = NULL;
int* cudalastnums = NULL;
if(texturemap_b)
{
if(iterations==1 && (maplastnums.size()==0||mapcolors.size()==0))
{
std::cout<<"No Texture Map Set!"<<std::endl;
texturemap_b = false;
}
uint3 *allcolors = new uint3[(int)mapcolors.size()];
int *alllastnum = new int[(int)maplastnums.size()];
for(int i=0;i<(int)mapcolors.size();i++)
allcolors[i] = mapcolors[i];
for(int i=0;i<(int)maplastnums.size();i++)
alllastnum[i] = maplastnums[i];
hipMalloc((void**)&cudacolors, (int)mapcolors.size()*sizeof(uint3));
hipMemcpy( cudacolors, allcolors, (int)mapcolors.size()*sizeof(uint3), hipMemcpyHostToDevice);
hipMalloc((void**)&cudalastnums, (int)maplastnums.size()*sizeof(int));
hipMemcpy( cudalastnums, alllastnum, (int)maplastnums.size()*sizeof(int), hipMemcpyHostToDevice);
delete allcolors;
delete alllastnum;
}
//Transfer Bump Map
uint3* cudabumcolors = NULL;
int* cudabumlastnums = NULL;
if(bumpmap_b)
{
if(iterations==1 && (bumlastnums.size()==0||bumcolors.size()==0))
{
std::cout<<"No Bump Map Set!"<<std::endl;
bumpmap_b = false;
}
uint3 *allcolors = new uint3[(int)bumcolors.size()];
int *alllastnum = new int[(int)bumlastnums.size()];
for(int i=0;i<(int)bumcolors.size();i++)
allcolors[i] = bumcolors[i];
for(int i=0;i<(int)bumlastnums.size();i++)
alllastnum[i] = bumlastnums[i];
hipMalloc((void**)&cudabumcolors, (int)bumcolors.size()*sizeof(uint3));
hipMemcpy( cudabumcolors, allcolors, (int)bumcolors.size()*sizeof(uint3), hipMemcpyHostToDevice);
hipMalloc((void**)&cudabumlastnums, (int)bumlastnums.size()*sizeof(int));
hipMemcpy( cudabumlastnums, alllastnum, (int)bumlastnums.size()*sizeof(int), hipMemcpyHostToDevice);
delete allcolors;
delete alllastnum;
}
//set up init rays
int numberOfInitrays = renderCam->resolution.x*renderCam->resolution.y;
ray* cudarays = NULL;
hipMalloc((void**)&cudarays, numberOfInitrays*sizeof(ray));
hipLaunchKernelGGL(( InitRays), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays,renderCam->resolution,cam,(float)iterations,DOF_b);
//set path trace dim
int raythreadsPerBlock = (int)(tileSize*tileSize);
int rayblocksPerGrid = ceil((float)numberOfInitrays/(float)raythreadsPerBlock);
// kernel launches
for(int i=0;i<=traceDepth;i++)
{
if(numberOfInitrays>0)
{
hipLaunchKernelGGL(( PathTraceColor), dim3(rayblocksPerGrid), dim3(raythreadsPerBlock), 0, 0, cudarays,numberOfInitrays,i,traceDepth,cudageoms,
numberOfGeoms,cudalightIds,lcount,cudamaterials,(float)iterations,cudacolors,cudalastnums,texturemap_b
,cudabumcolors,cudabumlastnums,bumpmap_b);
hipLaunchKernelGGL(( AddColor), dim3(rayblocksPerGrid), dim3(raythreadsPerBlock), 0, 0, cudaimage, cudarays,numberOfInitrays);
if(streamcompact_b)
{
thrust::device_ptr<ray> rayStart(cudarays);
ThrustStreamCompact(rayStart,numberOfInitrays);
rayblocksPerGrid = ceil((float)numberOfInitrays/(float)raythreadsPerBlock);
}
}
}
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage,(float)iterations);
// retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
//Added
hipFree( cudalightIds );
hipFree( cudamaterials );
hipFree( cudarays );
hipFree( cudacolors );
hipFree( cudalastnums );
hipFree( cudabumcolors );
hipFree( cudabumlastnums );
delete geomList;
//Added
delete materialList;
delete lightIds;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
} | 2245360249854cef354a193347209144a8ecd885.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
extern bool streamcompact_b;
extern bool texturemap_b;
extern bool bumpmap_b;
extern bool DOF_b;
extern bool MB_b;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
// Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
// TODO: IMPLEMENT THIS FUNCTION
// Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
glm::vec3 A = glm::cross(view,up);
glm::vec3 B = glm::cross(A,view);
glm::vec3 M = eye + view;
float angley = fov.y;
float anglex= fov.x;
glm::vec3 H = glm::normalize(A) * glm::length(view) * tan(glm::radians(anglex));
glm::vec3 V = glm::normalize(B) * glm::length(view) * tan(glm::radians(angley));
float sx = ((float)x)/(resolution.x - 1);
float sy = ((float)y)/(resolution.y - 1);
glm::vec3 P = M + (2.0f*sx-1)* H + (1-2.0f*sy) * V; //The picture begins
glm::vec3 D = glm::normalize(P - eye);
ray r;
r.origin = eye;
r.direction = D;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image,float iterations){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
color /= iterations;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
// TODO: IMPLEMENT THIS FUNCTION
// Core raytracer kernel
__global__ void PathTraceColor(ray* remainrays,int raysnum,int currdepth,int maxdepth,
staticGeom* geoms, int numberOfGeoms, int* lightIndex,
int lightNum,material* materials,float time,uint3* tcolors,int* tnums,bool textureb,uint3* bcolors,int* bnums,bool bumpb)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<=raysnum)
{
ray r = remainrays[index];
if(!r.exist) //If not exist, its new add color is 0
{
r.raycolor = glm::vec3(0,0,0);
remainrays[index] = r;
return;
}
//clear all ray if currdepth == maxdepth
if(currdepth==maxdepth)
{
r.exist = false;
remainrays[index] = r;
return;
}
bool Intersect = false;
glm::vec3 InterSectP,InterSectN;
int IntersectgeomId = -1;
Intersect = Intersecttest(r,InterSectP,InterSectN,geoms,numberOfGeoms,IntersectgeomId);
if(bumpb)
bumpMap(geoms,IntersectgeomId,InterSectN,InterSectP,bcolors,bnums);
//if the ray intersect with nothing, give it black/backgroundcolor
if(Intersect==false)
{
r.raycolor = glm::vec3(0,0,0);
r.exist = false;
remainrays[index] = r;
return;
}
material currMaterial = materials[geoms[IntersectgeomId].materialid];
if(textureb)
textureMap(geoms,IntersectgeomId,currMaterial,InterSectN,InterSectP,tcolors,tnums);
bool IsLight = false;
for(int i=0;i<lightNum;i++)
{
if(IntersectgeomId==lightIndex[i])
IsLight = true;
}
if(IsLight)
{
r.raycolor = r.raycolor * currMaterial.color * currMaterial.emittance;
r.exist = false;
}
else
{
int seed = (index+1) * (time/2 + currdepth);
int BSDF = calculateBSDF(r,InterSectP,InterSectN,currMaterial,seed,currdepth);
r.raycolor = r.raycolor * currMaterial.color;
}
remainrays[index] = r;
}
}
//Changed
__global__ void AddColor(glm::vec3* colors, ray* remainrays,int raysnum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<=raysnum){
ray r = remainrays[index];
if(r.exist==false)
colors[r.initindex] += r.raycolor;
}
}
__global__ void InitRays(ray* rays, glm::vec2 resolution, cameraData cam, float time,bool DOFbool)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y))
{
//anti-aliasing
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0, 1);
//ray r = raycastFromCameraKernel(resolution,0.0f, x + float(u01(rng)) -0.5f, y+float(u01(rng))-0.5f,cam.position,cam.view,cam.up,cam.fov);
ray r = raycastFromCameraKernel(resolution,0.0f, x , y,cam.position,cam.view,cam.up,cam.fov);
if(DOFbool)
{
glm::vec3 rand3 = generateRandomNumberFromThread(resolution, time, x, y);
glm::vec2 rand2 = glm::vec2(rand3.x,rand3.y);
glm::vec3 offset = glm::vec3(rand2.x * cos((float)TWO_PI*rand2.y), rand2.x * sin((float)TWO_PI*rand2.y), 0.0f) * cam.blurradius;
glm::vec3 p = r.origin + r.direction * cam.focallength / glm::dot(cam.view, r.direction);
r.origin = r.origin + offset;
r.direction = glm::normalize(p - r.origin);
}
r.exist = true;
r.initindex = index;
r.raycolor = glm::vec3(1,1,1);
r.IOR = 1.0f;
rays[index] = r;
}
}
struct Is_Exist
{
__host__ __device__
bool operator()(const ray x)
{
if(x.exist) return true;
else return false;
}
};
struct Is_Not_Exist
{
__host__ __device__
bool operator()(const ray x)
{
if(!x.exist) return true;
else return false;
}
};
//Stream Compact
void ThrustStreamCompact(thrust::device_ptr<ray> origin,int &N)
{
//Count how many rays still exist
int finallength = thrust::count_if(origin, origin+N,Is_Exist());
thrust::remove_if(origin, origin+N,Is_Not_Exist());
N = finallength;
return;
}
// TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials,
int numberOfMaterials, geom* geoms, int numberOfGeoms,std::vector<uint3> mapcolors,std::vector<int> maplastnums,
std::vector<uint3> bumcolors,std::vector<int> bumlastnums){
int traceDepth = 8; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
// send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
// package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
newStaticGeom.transinverseTransform = geoms[i].transinverseTransforms[frame];
if(MB_b)
{
newStaticGeom.MBV = geoms[i].MBV[frame];
int tempit = iterations%50;
glm::mat4 transform;
if(tempit>=0&&tempit<25)
{
newStaticGeom.translation += (float)tempit * newStaticGeom.MBV;
transform = utilityCore::buildTransformationMatrix(newStaticGeom.translation, newStaticGeom.rotation, newStaticGeom.scale);
}
else if(tempit>24)
{
newStaticGeom.translation -= (float)tempit * newStaticGeom.MBV;
transform = utilityCore::buildTransformationMatrix(newStaticGeom.translation, newStaticGeom.rotation, newStaticGeom.scale);
}
newStaticGeom.transform = utilityCore::glmMat4ToCudaMat4(transform);
newStaticGeom.inverseTransform = utilityCore::glmMat4ToCudaMat4(glm::inverse(transform));
}
newStaticGeom.tri = geoms[i].tri;
newStaticGeom.trinum = geoms[i].trinum;
newStaticGeom.texindex = geoms[i].texindex;
newStaticGeom.theight = geoms[i].theight;
newStaticGeom.twidth = geoms[i].twidth;
newStaticGeom.bumpindex = geoms[i].bumpindex;
newStaticGeom.bheight = geoms[i].bheight;
newStaticGeom.bwidth = geoms[i].bwidth;
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//materials
material* materialList = new material[numberOfMaterials];
for(int i=0; i<numberOfMaterials; i++){
material newMaterial;
newMaterial.color = materials[i].color;
//specular is useless as the highlight area color is decided by light
newMaterial.specularExponent = materials[i].specularExponent;
newMaterial.specularColor = materials[i].specularColor;
newMaterial.hasReflective = materials[i].hasReflective;
newMaterial.hasRefractive = materials[i].hasRefractive;
newMaterial.indexOfRefraction = materials[i].indexOfRefraction;
newMaterial.hasScatter = materials[i].hasScatter;
newMaterial.absorptionCoefficient = materials[i].absorptionCoefficient;
newMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient;
newMaterial.emittance = materials[i].emittance;
materialList[i] = newMaterial;
}
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamaterials, materialList, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
//light
int lcount = 0;
for(int i=0; i<numberOfGeoms; i++)
{
if(materials[geomList[i].materialid].emittance>0)
lcount++;
}
int *lightIds = new int[lcount];
lcount = 0;
for(int i=0; i<numberOfGeoms; i++)
{
if(materials[geomList[i].materialid].emittance>0)
{
lightIds[lcount] = i;
lcount++;
}
}
int *cudalightIds=NULL;
cudaMalloc((void**)&cudalightIds,lcount * sizeof(int));
cudaMemcpy( cudalightIds, lightIds,lcount * sizeof(int), cudaMemcpyHostToDevice);
// package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
cam.focallength = renderCam->focall;
cam.blurradius = renderCam->blurr;
//Transfer Texture Map
uint3* cudacolors = NULL;
int* cudalastnums = NULL;
if(texturemap_b)
{
if(iterations==1 && (maplastnums.size()==0||mapcolors.size()==0))
{
std::cout<<"No Texture Map Set!"<<std::endl;
texturemap_b = false;
}
uint3 *allcolors = new uint3[(int)mapcolors.size()];
int *alllastnum = new int[(int)maplastnums.size()];
for(int i=0;i<(int)mapcolors.size();i++)
allcolors[i] = mapcolors[i];
for(int i=0;i<(int)maplastnums.size();i++)
alllastnum[i] = maplastnums[i];
cudaMalloc((void**)&cudacolors, (int)mapcolors.size()*sizeof(uint3));
cudaMemcpy( cudacolors, allcolors, (int)mapcolors.size()*sizeof(uint3), cudaMemcpyHostToDevice);
cudaMalloc((void**)&cudalastnums, (int)maplastnums.size()*sizeof(int));
cudaMemcpy( cudalastnums, alllastnum, (int)maplastnums.size()*sizeof(int), cudaMemcpyHostToDevice);
delete allcolors;
delete alllastnum;
}
//Transfer Bump Map
uint3* cudabumcolors = NULL;
int* cudabumlastnums = NULL;
if(bumpmap_b)
{
if(iterations==1 && (bumlastnums.size()==0||bumcolors.size()==0))
{
std::cout<<"No Bump Map Set!"<<std::endl;
bumpmap_b = false;
}
uint3 *allcolors = new uint3[(int)bumcolors.size()];
int *alllastnum = new int[(int)bumlastnums.size()];
for(int i=0;i<(int)bumcolors.size();i++)
allcolors[i] = bumcolors[i];
for(int i=0;i<(int)bumlastnums.size();i++)
alllastnum[i] = bumlastnums[i];
cudaMalloc((void**)&cudabumcolors, (int)bumcolors.size()*sizeof(uint3));
cudaMemcpy( cudabumcolors, allcolors, (int)bumcolors.size()*sizeof(uint3), cudaMemcpyHostToDevice);
cudaMalloc((void**)&cudabumlastnums, (int)bumlastnums.size()*sizeof(int));
cudaMemcpy( cudabumlastnums, alllastnum, (int)bumlastnums.size()*sizeof(int), cudaMemcpyHostToDevice);
delete allcolors;
delete alllastnum;
}
//set up init rays
int numberOfInitrays = renderCam->resolution.x*renderCam->resolution.y;
ray* cudarays = NULL;
cudaMalloc((void**)&cudarays, numberOfInitrays*sizeof(ray));
InitRays<<<fullBlocksPerGrid, threadsPerBlock>>>(cudarays,renderCam->resolution,cam,(float)iterations,DOF_b);
//set path trace dim
int raythreadsPerBlock = (int)(tileSize*tileSize);
int rayblocksPerGrid = ceil((float)numberOfInitrays/(float)raythreadsPerBlock);
// kernel launches
for(int i=0;i<=traceDepth;i++)
{
if(numberOfInitrays>0)
{
PathTraceColor<<<rayblocksPerGrid, raythreadsPerBlock>>>(cudarays,numberOfInitrays,i,traceDepth,cudageoms,
numberOfGeoms,cudalightIds,lcount,cudamaterials,(float)iterations,cudacolors,cudalastnums,texturemap_b
,cudabumcolors,cudabumlastnums,bumpmap_b);
AddColor<<<rayblocksPerGrid, raythreadsPerBlock>>>(cudaimage, cudarays,numberOfInitrays);
if(streamcompact_b)
{
thrust::device_ptr<ray> rayStart(cudarays);
ThrustStreamCompact(rayStart,numberOfInitrays);
rayblocksPerGrid = ceil((float)numberOfInitrays/(float)raythreadsPerBlock);
}
}
}
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage,(float)iterations);
// retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
// free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
//Added
cudaFree( cudalightIds );
cudaFree( cudamaterials );
cudaFree( cudarays );
cudaFree( cudacolors );
cudaFree( cudalastnums );
cudaFree( cudabumcolors );
cudaFree( cudabumlastnums );
delete geomList;
//Added
delete materialList;
delete lightIds;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
} |
0edc00e8f62baba077d35a1fdb0283c6ad71a715.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "image.hpp"
#include "gaussian_kernel.hpp"
using namespace gpustitch;
#define GAUSS_KERN_RADIUS 3
#define GAUSS_KERN_SIZE ((GAUSS_KERN_RADIUS) * 2 + 1)
#define GAUSS_TILE_W 16
#define GAUSS_TILE_H 16
#define GAUSS_TILE_SIZE ((GAUSS_TILE_WIDTH) * (GAUSS_TILE_HEIGHT))
__constant__ float gauss_kern[GAUSS_KERN_SIZE];
__global__
void kern_gauss_blur_row(
unsigned char *src, int src_pitch,
int start_x, int start_y,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int shared_w = GAUSS_TILE_W + GAUSS_KERN_RADIUS * 2;
__shared__ uchar4 data[shared_w * GAUSS_TILE_H];
const int src_x = start_x + x;
const int src_y = start_y + y;
if(y >= h)
return;
uchar4 *src_line = (uchar4 *) (src + src_y * src_pitch);
const int src_x_left = min(start_x + w - 1, max(start_x, src_x - GAUSS_KERN_RADIUS));
const int src_x_right = min(start_x + w - 1, src_x + GAUSS_KERN_RADIUS);
data[threadIdx.x + shared_w * threadIdx.y] = src_line[src_x_left];
data[threadIdx.x + GAUSS_KERN_RADIUS*2 + shared_w * threadIdx.y] = src_line[src_x_right];
__syncthreads();
if(x >= w)
return;
float4 val = make_float4(0, 0, 0, 0);
for(int i = 0; i < GAUSS_KERN_SIZE; i++){
val.x += data[threadIdx.x + i + shared_w * threadIdx.y].x * gauss_kern[i];
val.y += data[threadIdx.x + i + shared_w * threadIdx.y].y * gauss_kern[i];
val.z += data[threadIdx.x + i + shared_w * threadIdx.y].z * gauss_kern[i];
}
src_line[src_x] = make_uchar4(val.x, val.y, val.z, 255);
}
__global__
void kern_gauss_blur_col(
unsigned char *src, int src_pitch,
int start_x, int start_y,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
__shared__ uchar4 data[GAUSS_TILE_W * (GAUSS_TILE_H + GAUSS_KERN_RADIUS * 2)];
const int src_x = start_x + x;
const int src_y = start_y + y;
if(x >= w)
return;
int src_y_top = min(start_y + h - 1, max(start_y, src_y - GAUSS_KERN_RADIUS));
int src_y_bot = min(start_y + h - 1, src_y + GAUSS_KERN_RADIUS);
uchar4 *src_line_top = (uchar4 *) (src + src_y_top * src_pitch);
uchar4 *src_line_bot = (uchar4 *) (src + src_y_bot * src_pitch);
data[threadIdx.x + GAUSS_TILE_W * threadIdx.y] = src_line_top[src_x];
data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + GAUSS_KERN_RADIUS*2)] = src_line_bot[src_x];
__syncthreads();
if(y >= h)
return;
float4 val = make_float4(0, 0, 0, 0);
for(int i = 0; i < GAUSS_KERN_SIZE; i++){
val.x += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].x * gauss_kern[i];
val.y += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].y * gauss_kern[i];
val.z += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].z * gauss_kern[i];
}
uchar4 *src_line = (uchar4 *) (src + src_y * src_pitch);
src_line[src_x] = make_uchar4(val.x, val.y, val.z, 255);
}
void cuda_gaussian_blur(const Image_cuda *img, int start_x, int start_y,
int w, int h,
ihipStream_t *stream)
{
const float sigma = 1.4f;
static bool kern_initialized = false;
if(!kern_initialized){
Gaussian_kernel<GAUSS_KERN_SIZE> kern(sigma);
hipMemcpyToSymbol(gauss_kern, kern.get(), sizeof(float) * GAUSS_KERN_SIZE);
kern_initialized = true;
}
dim3 blockSize(GAUSS_TILE_W, GAUSS_TILE_H);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( kern_gauss_blur_row), dim3(numBlocks), dim3(blockSize), 0, stream,
(unsigned char *) img->data(), img->get_pitch(),
start_x, start_y,
w, h);
hipLaunchKernelGGL(( kern_gauss_blur_col), dim3(numBlocks), dim3(blockSize), 0, stream,
(unsigned char *) img->data(), img->get_pitch(),
start_x, start_y,
w, h);
}
__global__
void kern_subtract_images(const unsigned char *a, int a_pitch,
const unsigned char *b, int b_pitch,
const unsigned char *res, int res_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *a_row = (uchar4 *) (a + y * a_pitch);
const uchar4 *b_row = (uchar4 *) (b + y * b_pitch);
uchar4 *res_row = (uchar4 *) (res + y * res_pitch);
if(x >= w || y >= h)
return;
res_row[x] = make_uchar4(
max(0, min(255, 128 + a_row[x].x - b_row[x].x)),
max(0, min(255, 128 + a_row[x].y - b_row[x].y)),
max(0, min(255, 128 + a_row[x].z - b_row[x].z)),
255
);
}
void cuda_subtract_images(const gpustitch::Image_cuda *a,
const gpustitch::Image_cuda *b,
gpustitch::Image_cuda *result,
int w, int h,
ihipStream_t *stream)
{
dim3 blockSize(32, 32);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( kern_subtract_images), dim3(numBlocks), dim3(blockSize), 0, stream,
(const unsigned char *) a->data(), a->get_pitch(),
(const unsigned char *) b->data(), b->get_pitch(),
(unsigned char *) result->data(), result->get_pitch(),
w, h);
}
__global__
void kern_add_images(const unsigned char *low, int low_pitch,
const unsigned char *high, int high_pitch,
unsigned char *res, int res_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *low_row = (uchar4 *) (low + y * low_pitch);
const uchar4 *high_row = (uchar4 *) (high + y * high_pitch);
uchar4 *res_row = (uchar4 *) (res + y * res_pitch);
if(x >= w || y >= h)
return;
res_row[x] = make_uchar4(
max(0, min(255, low_row[x].x + (high_row[x].x - 128))),
max(0, min(255, low_row[x].y + (high_row[x].y - 128))),
max(0, min(255, low_row[x].z + (high_row[x].z - 128))),
255
);
}
void cuda_add_images(const gpustitch::Image_cuda *a,
const gpustitch::Image_cuda *b,
gpustitch::Image_cuda *result,
int w, int h,
ihipStream_t *stream)
{
dim3 blockSize(32, 32);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( kern_add_images), dim3(numBlocks), dim3(blockSize), 0, stream,
(const unsigned char *) a->data(), a->get_pitch(),
(const unsigned char *) b->data(), b->get_pitch(),
(unsigned char *) result->data(), result->get_pitch(),
w, h);
}
__global__
void kern_downsample(unsigned char *dst, int dst_pitch,
const unsigned char *src, int src_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *src_row = (uchar4 *) (src + y * 2 * src_pitch);
uchar4 *dst_row = (uchar4 *) (dst + y * dst_pitch);
if(x >= w || y >= h)
return;
dst_row[x] = src_row[2*x];
}
void cuda_downsample(gpustitch::Image_cuda *dst,
const gpustitch::Image_cuda *src,
int w, int h,
ihipStream_t *stream)
{
int downsampled_w = w / 2;
int downsampled_h = h / 2;
dim3 blockSize(32, 32);
dim3 numBlocks((downsampled_w + blockSize.x - 1) / blockSize.x,
(downsampled_h + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( kern_downsample), dim3(numBlocks), dim3(blockSize), 0, stream,
(unsigned char *) dst->data(), dst->get_pitch(),
(const unsigned char *) src->data(), src->get_pitch(),
downsampled_w, downsampled_h);
}
__global__
void kern_upsample(unsigned char *dst, int dst_pitch,
const unsigned char *src, int src_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *src_row = (uchar4 *) (src + (y/2) * src_pitch);
uchar4 *dst_row = (uchar4 *) (dst + y * dst_pitch);
if(x >= w || y >= h)
return;
dst_row[x] = src_row[x / 2];
}
void cuda_upsample(gpustitch::Image_cuda *dst,
const gpustitch::Image_cuda *src,
int w, int h,
ihipStream_t *stream)
{
int upsampled_w = 2 * w;
int upsampled_h = 2 * h;
dim3 blockSize(32, 32);
dim3 numBlocks((upsampled_w + blockSize.x - 1) / blockSize.x,
(upsampled_h + blockSize.y - 1) / blockSize.y);
hipLaunchKernelGGL(( kern_upsample), dim3(numBlocks), dim3(blockSize), 0, stream,
(unsigned char *) dst->data(), dst->get_pitch(),
(const unsigned char *) src->data(), src->get_pitch(),
upsampled_w, upsampled_h);
}
| 0edc00e8f62baba077d35a1fdb0283c6ad71a715.cu | #include "image.hpp"
#include "gaussian_kernel.hpp"
using namespace gpustitch;
#define GAUSS_KERN_RADIUS 3
#define GAUSS_KERN_SIZE ((GAUSS_KERN_RADIUS) * 2 + 1)
#define GAUSS_TILE_W 16
#define GAUSS_TILE_H 16
#define GAUSS_TILE_SIZE ((GAUSS_TILE_WIDTH) * (GAUSS_TILE_HEIGHT))
__constant__ float gauss_kern[GAUSS_KERN_SIZE];
__global__
void kern_gauss_blur_row(
unsigned char *src, int src_pitch,
int start_x, int start_y,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int shared_w = GAUSS_TILE_W + GAUSS_KERN_RADIUS * 2;
__shared__ uchar4 data[shared_w * GAUSS_TILE_H];
const int src_x = start_x + x;
const int src_y = start_y + y;
if(y >= h)
return;
uchar4 *src_line = (uchar4 *) (src + src_y * src_pitch);
const int src_x_left = min(start_x + w - 1, max(start_x, src_x - GAUSS_KERN_RADIUS));
const int src_x_right = min(start_x + w - 1, src_x + GAUSS_KERN_RADIUS);
data[threadIdx.x + shared_w * threadIdx.y] = src_line[src_x_left];
data[threadIdx.x + GAUSS_KERN_RADIUS*2 + shared_w * threadIdx.y] = src_line[src_x_right];
__syncthreads();
if(x >= w)
return;
float4 val = make_float4(0, 0, 0, 0);
for(int i = 0; i < GAUSS_KERN_SIZE; i++){
val.x += data[threadIdx.x + i + shared_w * threadIdx.y].x * gauss_kern[i];
val.y += data[threadIdx.x + i + shared_w * threadIdx.y].y * gauss_kern[i];
val.z += data[threadIdx.x + i + shared_w * threadIdx.y].z * gauss_kern[i];
}
src_line[src_x] = make_uchar4(val.x, val.y, val.z, 255);
}
__global__
void kern_gauss_blur_col(
unsigned char *src, int src_pitch,
int start_x, int start_y,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
__shared__ uchar4 data[GAUSS_TILE_W * (GAUSS_TILE_H + GAUSS_KERN_RADIUS * 2)];
const int src_x = start_x + x;
const int src_y = start_y + y;
if(x >= w)
return;
int src_y_top = min(start_y + h - 1, max(start_y, src_y - GAUSS_KERN_RADIUS));
int src_y_bot = min(start_y + h - 1, src_y + GAUSS_KERN_RADIUS);
uchar4 *src_line_top = (uchar4 *) (src + src_y_top * src_pitch);
uchar4 *src_line_bot = (uchar4 *) (src + src_y_bot * src_pitch);
data[threadIdx.x + GAUSS_TILE_W * threadIdx.y] = src_line_top[src_x];
data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + GAUSS_KERN_RADIUS*2)] = src_line_bot[src_x];
__syncthreads();
if(y >= h)
return;
float4 val = make_float4(0, 0, 0, 0);
for(int i = 0; i < GAUSS_KERN_SIZE; i++){
val.x += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].x * gauss_kern[i];
val.y += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].y * gauss_kern[i];
val.z += data[threadIdx.x + GAUSS_TILE_W * (threadIdx.y + i)].z * gauss_kern[i];
}
uchar4 *src_line = (uchar4 *) (src + src_y * src_pitch);
src_line[src_x] = make_uchar4(val.x, val.y, val.z, 255);
}
void cuda_gaussian_blur(const Image_cuda *img, int start_x, int start_y,
int w, int h,
CUstream_st *stream)
{
const float sigma = 1.4f;
static bool kern_initialized = false;
if(!kern_initialized){
Gaussian_kernel<GAUSS_KERN_SIZE> kern(sigma);
cudaMemcpyToSymbol(gauss_kern, kern.get(), sizeof(float) * GAUSS_KERN_SIZE);
kern_initialized = true;
}
dim3 blockSize(GAUSS_TILE_W, GAUSS_TILE_H);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
kern_gauss_blur_row<<<numBlocks, blockSize, 0, stream>>>(
(unsigned char *) img->data(), img->get_pitch(),
start_x, start_y,
w, h);
kern_gauss_blur_col<<<numBlocks, blockSize, 0, stream>>>(
(unsigned char *) img->data(), img->get_pitch(),
start_x, start_y,
w, h);
}
__global__
void kern_subtract_images(const unsigned char *a, int a_pitch,
const unsigned char *b, int b_pitch,
const unsigned char *res, int res_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *a_row = (uchar4 *) (a + y * a_pitch);
const uchar4 *b_row = (uchar4 *) (b + y * b_pitch);
uchar4 *res_row = (uchar4 *) (res + y * res_pitch);
if(x >= w || y >= h)
return;
res_row[x] = make_uchar4(
max(0, min(255, 128 + a_row[x].x - b_row[x].x)),
max(0, min(255, 128 + a_row[x].y - b_row[x].y)),
max(0, min(255, 128 + a_row[x].z - b_row[x].z)),
255
);
}
void cuda_subtract_images(const gpustitch::Image_cuda *a,
const gpustitch::Image_cuda *b,
gpustitch::Image_cuda *result,
int w, int h,
CUstream_st *stream)
{
dim3 blockSize(32, 32);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
kern_subtract_images<<<numBlocks, blockSize, 0, stream>>>(
(const unsigned char *) a->data(), a->get_pitch(),
(const unsigned char *) b->data(), b->get_pitch(),
(unsigned char *) result->data(), result->get_pitch(),
w, h);
}
__global__
void kern_add_images(const unsigned char *low, int low_pitch,
const unsigned char *high, int high_pitch,
unsigned char *res, int res_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *low_row = (uchar4 *) (low + y * low_pitch);
const uchar4 *high_row = (uchar4 *) (high + y * high_pitch);
uchar4 *res_row = (uchar4 *) (res + y * res_pitch);
if(x >= w || y >= h)
return;
res_row[x] = make_uchar4(
max(0, min(255, low_row[x].x + (high_row[x].x - 128))),
max(0, min(255, low_row[x].y + (high_row[x].y - 128))),
max(0, min(255, low_row[x].z + (high_row[x].z - 128))),
255
);
}
void cuda_add_images(const gpustitch::Image_cuda *a,
const gpustitch::Image_cuda *b,
gpustitch::Image_cuda *result,
int w, int h,
CUstream_st *stream)
{
dim3 blockSize(32, 32);
dim3 numBlocks((w + blockSize.x - 1) / blockSize.x,
(h + blockSize.y - 1) / blockSize.y);
kern_add_images<<<numBlocks, blockSize, 0, stream>>>(
(const unsigned char *) a->data(), a->get_pitch(),
(const unsigned char *) b->data(), b->get_pitch(),
(unsigned char *) result->data(), result->get_pitch(),
w, h);
}
__global__
void kern_downsample(unsigned char *dst, int dst_pitch,
const unsigned char *src, int src_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *src_row = (uchar4 *) (src + y * 2 * src_pitch);
uchar4 *dst_row = (uchar4 *) (dst + y * dst_pitch);
if(x >= w || y >= h)
return;
dst_row[x] = src_row[2*x];
}
void cuda_downsample(gpustitch::Image_cuda *dst,
const gpustitch::Image_cuda *src,
int w, int h,
CUstream_st *stream)
{
int downsampled_w = w / 2;
int downsampled_h = h / 2;
dim3 blockSize(32, 32);
dim3 numBlocks((downsampled_w + blockSize.x - 1) / blockSize.x,
(downsampled_h + blockSize.y - 1) / blockSize.y);
kern_downsample<<<numBlocks, blockSize, 0, stream>>>(
(unsigned char *) dst->data(), dst->get_pitch(),
(const unsigned char *) src->data(), src->get_pitch(),
downsampled_w, downsampled_h);
}
__global__
void kern_upsample(unsigned char *dst, int dst_pitch,
const unsigned char *src, int src_pitch,
int w, int h)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const uchar4 *src_row = (uchar4 *) (src + (y/2) * src_pitch);
uchar4 *dst_row = (uchar4 *) (dst + y * dst_pitch);
if(x >= w || y >= h)
return;
dst_row[x] = src_row[x / 2];
}
void cuda_upsample(gpustitch::Image_cuda *dst,
const gpustitch::Image_cuda *src,
int w, int h,
CUstream_st *stream)
{
int upsampled_w = 2 * w;
int upsampled_h = 2 * h;
dim3 blockSize(32, 32);
dim3 numBlocks((upsampled_w + blockSize.x - 1) / blockSize.x,
(upsampled_h + blockSize.y - 1) / blockSize.y);
kern_upsample<<<numBlocks, blockSize, 0, stream>>>(
(unsigned char *) dst->data(), dst->get_pitch(),
(const unsigned char *) src->data(), src->get_pitch(),
upsampled_w, upsampled_h);
}
|
4d5a094b2b9671c9b23f6856eb83e811026f43da.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "device_adapter_hip.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK(array.valid.Size() == 0) << "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(hipMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
hipMemcpyDefault));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(ptr_device);
linalg::ElementWiseKernelDevice(t, [=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array}, linalg::UnravelIndex<D>(i, array.shape));
});
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool),
hipMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
hipcub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
hipcub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
auto array_interface{ArrayInterface<1>(array)};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
auto array_interface{ArrayInterface<1>(array)};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
} // namespace xgboost
| 4d5a094b2b9671c9b23f6856eb83e811026f43da.cu | /*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "device_adapter.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK(array.valid.Size() == 0) << "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(cudaMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
cudaMemcpyDefault));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(ptr_device);
linalg::ElementWiseKernelDevice(t, [=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array}, linalg::UnravelIndex<D>(i, array.shape));
});
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool),
cudaMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
cub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
cub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
auto array_interface{ArrayInterface<1>(array)};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
auto array_interface{ArrayInterface<1>(array)};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
} // namespace xgboost
|
c50bdcf70907221aaf3ad133c51488efca42754d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_mix24_1_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *ip = NULL;
hipMalloc(&ip, XSIZE*YSIZE);
uint32_t stride = 2;
int32_t *u = NULL;
hipMalloc(&u, XSIZE*YSIZE);
int32_t *v = NULL;
hipMalloc(&v, XSIZE*YSIZE);
int32_t numSamples = 1;
uint16_t *shiftUV = NULL;
hipMalloc(&shiftUV, XSIZE*YSIZE);
int32_t mixres = 1;
uint32_t mask = 1;
int32_t m2 = 1;
int32_t mixbits = 1;
int32_t shift = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_mix24_1_1), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_mix24_1_1), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_mix24_1_1), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c50bdcf70907221aaf3ad133c51488efca42754d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_mix24_1_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *ip = NULL;
cudaMalloc(&ip, XSIZE*YSIZE);
uint32_t stride = 2;
int32_t *u = NULL;
cudaMalloc(&u, XSIZE*YSIZE);
int32_t *v = NULL;
cudaMalloc(&v, XSIZE*YSIZE);
int32_t numSamples = 1;
uint16_t *shiftUV = NULL;
cudaMalloc(&shiftUV, XSIZE*YSIZE);
int32_t mixres = 1;
uint32_t mask = 1;
int32_t m2 = 1;
int32_t mixbits = 1;
int32_t shift = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_mix24_1_1<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_mix24_1_1<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_mix24_1_1<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples,shiftUV,mixres,mask,m2,mixbits,shift);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ba2f567ddfb69e882f61c53d8469ee2a13bbb337.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
//__constant__ int datos[1024];
__global__ void kernel(int *d_dst, int *d_src) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
d_dst[tId] = d_src[tId];
}
int main(int argc, char **argv) {
int *d_datos, *h_datos, *d_src;
hipMalloc((void**)&d_datos, sizeof(int) * 1024);
hipMalloc((void**)&d_src, sizeof(int) * 1024);
h_datos = (int *)malloc(sizeof(int) * 1024);
int *test = new int[1024];
memset(test, 0, sizeof(int) * 1024);
for (int i = 0; i < 1024; i++) {
test[i] = i;
}
//GPU Time
hipEvent_t start, stop;
float time;
hipMemcpy(d_src, d_datos, sizeof(int)*1024, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// Kernel call
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1024) , 0, 0, d_datos, d_src);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// Copying From Device to Host
hipMemcpy(h_datos, d_datos, sizeof(int)*1024, hipMemcpyDeviceToHost);
printf("Time : %f ms\n",time);
hipEventDestroy(start);
hipEventDestroy(stop);
free(test);
hipFree(d_datos);
return 0;
}
| ba2f567ddfb69e882f61c53d8469ee2a13bbb337.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//__constant__ int datos[1024];
__global__ void kernel(int *d_dst, int *d_src) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
d_dst[tId] = d_src[tId];
}
int main(int argc, char **argv) {
int *d_datos, *h_datos, *d_src;
cudaMalloc((void**)&d_datos, sizeof(int) * 1024);
cudaMalloc((void**)&d_src, sizeof(int) * 1024);
h_datos = (int *)malloc(sizeof(int) * 1024);
int *test = new int[1024];
memset(test, 0, sizeof(int) * 1024);
for (int i = 0; i < 1024; i++) {
test[i] = i;
}
//GPU Time
cudaEvent_t start, stop;
float time;
cudaMemcpy(d_src, d_datos, sizeof(int)*1024, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// Kernel call
kernel<<< 1, 1024 >>>(d_datos, d_src);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// Copying From Device to Host
cudaMemcpy(h_datos, d_datos, sizeof(int)*1024, cudaMemcpyDeviceToHost);
printf("Time : %f ms\n",time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(test);
cudaFree(d_datos);
return 0;
}
|
e1d82a78c1bd2b223facdf9b7fa05c971be3e324.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
static __device__ void compute_strain_product(float * prod, const float eps_trace_over_3, const float * epsdev, const float b_eps_trace_over_3, const float * b_epsdev){
float eps[(6)];
float b_eps[(6)];
int p;
int i;
int j;
eps[0] = epsdev[0] + eps_trace_over_3;
eps[1] = epsdev[1] + eps_trace_over_3;
eps[2] = -(eps[0] + eps[1]) + (eps_trace_over_3) * (3.0f);
eps[3] = epsdev[4];
eps[4] = epsdev[3];
eps[5] = epsdev[2];
b_eps[0] = b_epsdev[0] + b_eps_trace_over_3;
b_eps[1] = b_epsdev[1] + b_eps_trace_over_3;
b_eps[2] = -(b_eps[0] + b_eps[1]) + (b_eps_trace_over_3) * (3.0f);
b_eps[3] = b_epsdev[4];
b_eps[4] = b_epsdev[3];
b_eps[5] = b_epsdev[2];
p = 0;
for (i = 0; i <= 5; i += 1) {
for (j = i; j <= 5; j += 1) {
prod[p] = (eps[i]) * (b_eps[j]);
if (j > i) {
prod[p] = prod[p] + (eps[j]) * (b_eps[i]);
if (j > 2 && i < 3) {
prod[p] = (prod[p]) * (2.0f);
}
}
if (i > 2) {
prod[p] = (prod[p]) * (4.0f);
}
p = p + 1;
}
}
}
__global__ void compute_ani_kernel(const float * epsilondev_xx, const float * epsilondev_yy, const float * epsilondev_xy, const float * epsilondev_xz, const float * epsilondev_yz, const float * epsilon_trace_over_3, const float * b_epsilondev_xx, const float * b_epsilondev_yy, const float * b_epsilondev_xy, const float * b_epsilondev_xz, const float * b_epsilondev_yz, const float * b_epsilon_trace_over_3, float * cijkl_kl, const int NSPEC, const float deltat){
int i;
int ispec;
int ijk_ispec;
float eps_trace_over_3;
float b_eps_trace_over_3;
float prod[(21)];
float epsdev[(5)];
float b_epsdev[(5)];
ispec = blockIdx.x + (blockIdx.y) * (gridDim.x);
if (ispec < NSPEC) {
ijk_ispec = threadIdx.x + (NGLL3) * (ispec);
epsdev[0] = epsilondev_xx[ijk_ispec];
epsdev[1] = epsilondev_yy[ijk_ispec];
epsdev[2] = epsilondev_xy[ijk_ispec];
epsdev[3] = epsilondev_xz[ijk_ispec];
epsdev[4] = epsilondev_yz[ijk_ispec];
b_epsdev[0] = b_epsilondev_xx[ijk_ispec];
b_epsdev[1] = b_epsilondev_yy[ijk_ispec];
b_epsdev[2] = b_epsilondev_xy[ijk_ispec];
b_epsdev[3] = b_epsilondev_xz[ijk_ispec];
b_epsdev[4] = b_epsilondev_yz[ijk_ispec];
eps_trace_over_3 = epsilon_trace_over_3[ijk_ispec];
b_eps_trace_over_3 = b_epsilon_trace_over_3[ijk_ispec];
compute_strain_product(prod, eps_trace_over_3, epsdev, b_eps_trace_over_3, b_epsdev);
for (i = 0; i <= 20; i += 1) {
cijkl_kl[i + (21) * (ijk_ispec)] = cijkl_kl[i + (21) * (ijk_ispec)] + (deltat) * (prod[i]);
}
}
}
| e1d82a78c1bd2b223facdf9b7fa05c971be3e324.cu | //note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
static __device__ void compute_strain_product(float * prod, const float eps_trace_over_3, const float * epsdev, const float b_eps_trace_over_3, const float * b_epsdev){
float eps[(6)];
float b_eps[(6)];
int p;
int i;
int j;
eps[0] = epsdev[0] + eps_trace_over_3;
eps[1] = epsdev[1] + eps_trace_over_3;
eps[2] = -(eps[0] + eps[1]) + (eps_trace_over_3) * (3.0f);
eps[3] = epsdev[4];
eps[4] = epsdev[3];
eps[5] = epsdev[2];
b_eps[0] = b_epsdev[0] + b_eps_trace_over_3;
b_eps[1] = b_epsdev[1] + b_eps_trace_over_3;
b_eps[2] = -(b_eps[0] + b_eps[1]) + (b_eps_trace_over_3) * (3.0f);
b_eps[3] = b_epsdev[4];
b_eps[4] = b_epsdev[3];
b_eps[5] = b_epsdev[2];
p = 0;
for (i = 0; i <= 5; i += 1) {
for (j = i; j <= 5; j += 1) {
prod[p] = (eps[i]) * (b_eps[j]);
if (j > i) {
prod[p] = prod[p] + (eps[j]) * (b_eps[i]);
if (j > 2 && i < 3) {
prod[p] = (prod[p]) * (2.0f);
}
}
if (i > 2) {
prod[p] = (prod[p]) * (4.0f);
}
p = p + 1;
}
}
}
__global__ void compute_ani_kernel(const float * epsilondev_xx, const float * epsilondev_yy, const float * epsilondev_xy, const float * epsilondev_xz, const float * epsilondev_yz, const float * epsilon_trace_over_3, const float * b_epsilondev_xx, const float * b_epsilondev_yy, const float * b_epsilondev_xy, const float * b_epsilondev_xz, const float * b_epsilondev_yz, const float * b_epsilon_trace_over_3, float * cijkl_kl, const int NSPEC, const float deltat){
int i;
int ispec;
int ijk_ispec;
float eps_trace_over_3;
float b_eps_trace_over_3;
float prod[(21)];
float epsdev[(5)];
float b_epsdev[(5)];
ispec = blockIdx.x + (blockIdx.y) * (gridDim.x);
if (ispec < NSPEC) {
ijk_ispec = threadIdx.x + (NGLL3) * (ispec);
epsdev[0] = epsilondev_xx[ijk_ispec];
epsdev[1] = epsilondev_yy[ijk_ispec];
epsdev[2] = epsilondev_xy[ijk_ispec];
epsdev[3] = epsilondev_xz[ijk_ispec];
epsdev[4] = epsilondev_yz[ijk_ispec];
b_epsdev[0] = b_epsilondev_xx[ijk_ispec];
b_epsdev[1] = b_epsilondev_yy[ijk_ispec];
b_epsdev[2] = b_epsilondev_xy[ijk_ispec];
b_epsdev[3] = b_epsilondev_xz[ijk_ispec];
b_epsdev[4] = b_epsilondev_yz[ijk_ispec];
eps_trace_over_3 = epsilon_trace_over_3[ijk_ispec];
b_eps_trace_over_3 = b_epsilon_trace_over_3[ijk_ispec];
compute_strain_product(prod, eps_trace_over_3, epsdev, b_eps_trace_over_3, b_epsdev);
for (i = 0; i <= 20; i += 1) {
cijkl_kl[i + (21) * (ijk_ispec)] = cijkl_kl[i + (21) * (ijk_ispec)] + (deltat) * (prod[i]);
}
}
}
|
df03f100e39c01abc9e0f614abe8fd34b168b885.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void reduce6(int *g_idata, int *g_odata, int n) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} | df03f100e39c01abc9e0f614abe8fd34b168b885.cu | #include "includes.h"
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void reduce6(int *g_idata, int *g_odata, int n) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} |
atax.hip | // !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../../dimensions.h"
#include "../common/polybenchUtilFuncts.h"
#include "../common/polybench.c"
#include <hip/hip_runtime.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A, int NX, int NY)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu, int NY)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int NX, int NY, int funcId)
{
int i = getGlobalIdFunc[funcId]();
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int NX, int NY, int funcId)
{
int j = getGlobalIdFunc[funcId]();
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, int NX, int NY)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
int main(int argc, char** argv)
{
double t_start, t_end;
int NX = 0;
int NY = 0;
int kernel = 0;
int funcId = 0;
int i = 0;
if (argc != 11) {
printf("Uso: %s <kernel> <g.x> <g.y> <g.z> <b.x> <b.y> <b.z> <nx> <ny> \n", argv[0]);
/*printf(" funcId:\n");
printf(" 0: 1D_1D, 1: 1D_2D, 2: 1D_3D\n");
printf(" 3: 2D_1D, 4: 2D_2D, 5: 2D_3D\n");
printf(" 6: 3D_1D, 7: 3D_2D, 8: 3D_3D\n");*/
return 0;
}
else{
printf("#argumentos (argc): %d\n", argc);
for (i = 0; i < argc; ++i) {
printf(" argv[%d]: %s\n", i, argv[i]);
}
kernel = atoi(argv[1]);
NX = atoi(argv[8]);
NY = atoi(argv[9]);
//funcId = atoi(argv[10]);
//printf("Executando: %s atax_kernel_%d grid(%d, %d, %d) block(%d, %d, %d) %d\n", argv[0], kernel, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
}
/* Recuperar as informaes da GPU. */
printf("%s Starting...\n", argv[0]);
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
init_array(x, A, NX, NY);
GPU_argv_init();
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
if (kernel == 0){
dim3 block1(atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
dim3 grid1(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]));
funcId = calculateFunctionId(grid1, block1);
printf("funcId: %d\n", funcId);
t_start = rtclock();
hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block1) , 0, 0, A_gpu,x_gpu,tmp_gpu, NX, NY, funcId);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
}else{
dim3 block2(atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
dim3 grid2(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]));
funcId = calculateFunctionId(grid2, block2);
printf("funcId: %d\n", funcId);
t_start = rtclock();
hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block2) , 0, 0, A_gpu,y_gpu,tmp_gpu, NX, NY, funcId);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
}
t_start = rtclock();
atax_cpu(A, x, y, tmp, NX, NY);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu, NY);
free(A);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return 0;
}
| atax.cu | /**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../../dimensions.h"
#include "../common/polybenchUtilFuncts.h"
#include "../common/polybench.c"
#include <cuda_runtime.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A, int NX, int NY)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu, int NY)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int NX, int NY, int funcId)
{
int i = getGlobalIdFunc[funcId]();
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int NX, int NY, int funcId)
{
int j = getGlobalIdFunc[funcId]();
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, int NX, int NY)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
int main(int argc, char** argv)
{
double t_start, t_end;
int NX = 0;
int NY = 0;
int kernel = 0;
int funcId = 0;
int i = 0;
if (argc != 11) {
printf("Uso: %s <kernel> <g.x> <g.y> <g.z> <b.x> <b.y> <b.z> <nx> <ny> \n", argv[0]);
/*printf(" funcId:\n");
printf(" 0: 1D_1D, 1: 1D_2D, 2: 1D_3D\n");
printf(" 3: 2D_1D, 4: 2D_2D, 5: 2D_3D\n");
printf(" 6: 3D_1D, 7: 3D_2D, 8: 3D_3D\n");*/
return 0;
}
else{
printf("#argumentos (argc): %d\n", argc);
for (i = 0; i < argc; ++i) {
printf(" argv[%d]: %s\n", i, argv[i]);
}
kernel = atoi(argv[1]);
NX = atoi(argv[8]);
NY = atoi(argv[9]);
//funcId = atoi(argv[10]);
//printf("Executando: %s atax_kernel_%d grid(%d, %d, %d) block(%d, %d, %d) %d\n", argv[0], kernel, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
}
/* Recuperar as informações da GPU. */
printf("%s Starting...\n", argv[0]);
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
init_array(x, A, NX, NY);
GPU_argv_init();
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
if (kernel == 0){
dim3 block1(atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
dim3 grid1(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]));
funcId = calculateFunctionId(grid1, block1);
printf("funcId: %d\n", funcId);
t_start = rtclock();
atax_kernel1<<< grid1, block1 >>>(A_gpu,x_gpu,tmp_gpu, NX, NY, funcId);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
}else{
dim3 block2(atoi(argv[5]), atoi(argv[6]), atoi(argv[7]));
dim3 grid2(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]));
funcId = calculateFunctionId(grid2, block2);
printf("funcId: %d\n", funcId);
t_start = rtclock();
atax_kernel2<<< grid2, block2 >>>(A_gpu,y_gpu,tmp_gpu, NX, NY, funcId);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
}
t_start = rtclock();
atax_cpu(A, x, y, tmp, NX, NY);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu, NY);
free(A);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return 0;
}
|
764cb2af95cc4b5479d2c157ed57f73047e6c3cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void TemporalConvolutionTBC_fp_bias( float* output_features, float* bias, int output_stride, int rows) {
int x = blockIdx.x * 32 + threadIdx.x;
float b = bias[x];
for (int row = blockIdx.y; row < rows; row += gridDim.y) {
output_features[row * output_stride + x] = b;
}
} | 764cb2af95cc4b5479d2c157ed57f73047e6c3cb.cu | #include "includes.h"
__global__ void TemporalConvolutionTBC_fp_bias( float* output_features, float* bias, int output_stride, int rows) {
int x = blockIdx.x * 32 + threadIdx.x;
float b = bias[x];
for (int row = blockIdx.y; row < rows; row += gridDim.y) {
output_features[row * output_stride + x] = b;
}
} |
c223e7499cc3acd0f86f8b16ad5444f79686b814.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
namespace at { namespace native {
template <template<class> class Op>
std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<2>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
});
return tensor_lists[1];
}
template <template<class> class Op>
void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<1>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
});
}
#define FOREACH_UNARY_OP(NAME, NAME1) \
template<typename T> \
struct NAME1 { \
__device__ T operator()(T t) const { return std::NAME(t); } \
}; \
\
std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors)) { \
return at::native::foreach_tensor_##NAME##_slow(tensors); \
} \
\
return foreach_unary_op<NAME1>(tensors); \
} \
\
void foreach_tensor_##NAME##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors)) { \
return at::native::foreach_tensor_##NAME##_slow_(tensors); \
} \
\
foreach_unary_op_<NAME1>(tensors); \
}
FOREACH_UNARY_OP(exp, Exp);
FOREACH_UNARY_OP(sqrt, Sqrt);
}} // namespace at::native
| c223e7499cc3acd0f86f8b16ad5444f79686b814.cu | #include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
namespace at { namespace native {
template <template<class> class Op>
std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<2>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
});
return tensor_lists[1];
}
template <template<class> class Op>
void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<1>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
});
}
#define FOREACH_UNARY_OP(NAME, NAME1) \
template<typename T> \
struct NAME1 { \
__device__ T operator()(T t) const { return std::NAME(t); } \
}; \
\
std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors)) { \
return at::native::foreach_tensor_##NAME##_slow(tensors); \
} \
\
return foreach_unary_op<NAME1>(tensors); \
} \
\
void foreach_tensor_##NAME##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors)) { \
return at::native::foreach_tensor_##NAME##_slow_(tensors); \
} \
\
foreach_unary_op_<NAME1>(tensors); \
}
FOREACH_UNARY_OP(exp, Exp);
FOREACH_UNARY_OP(sqrt, Sqrt);
}} // namespace at::native
|
3bb8971d74af45603e1f2c4a4c0de9d4fbd9ff68.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// includes, kernels
#include "scan_largearray_kernel.cu"
#define DEFAULT_NUM_ELEMENTS 16000000
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
bool CompareArrays(float *A, float *B, int size);
void WriteFile(float* arr, char* file_name, int num_elements);
int ReadParamsFile(int* params, char* file_name, int num_params);
int ReadFile(float* arr, char* file_name, int num_elements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int num_read = 0;
int* size = (int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
data2read = ReadParamsFile(size, argv[1], data2read);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
data2read = ReadParamsFile(size, argv[1], data2read);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
num_read = ReadFile(h_data, argv[2], num_elements);
if(num_read != num_elements)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
struct timeval start_time, end_time;
gettimeofday(&start_time,NULL);
computeGold( reference, h_data, num_elements);
gettimeofday(&end_time,NULL);
printf("Processing %d elements...\n", num_elements);
double start_count = (double) start_time.tv_sec
+ 1.e-6 * (double) start_time.tv_usec;
double end_count = (double) end_time.tv_sec +
1.e-6 * (double) end_time.tv_usec;
double host_ms = (double)( (end_count - start_count) * 1000);
printf("CPU Processing time: %lf (ms)\n", host_ms);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
int padded_num_elements = TILE_SIZE*((num_elements+TILE_SIZE-1)/TILE_SIZE);
int padded_mem_size = padded_num_elements *sizeof(float);
// Make a padded copy of the input data
float* padded_hdata = (float*) malloc(padded_mem_size);
memcpy(padded_hdata, h_data, mem_size);
memset(padded_hdata+num_elements, 0, padded_mem_size - mem_size);
hipMalloc( (void**) &d_idata, padded_mem_size);
hipMalloc( (void**) &d_odata, padded_mem_size);
// copy host memory to device input array
hipMemcpy( d_idata, padded_hdata, padded_mem_size, hipMemcpyHostToDevice);
// initialize all the other device arrays to be safe
hipMemcpy( d_odata, padded_hdata, padded_mem_size, hipMemcpyHostToDevice);
free(padded_hdata);
padded_hdata = NULL;
// **===--------------- Allocate data structure here --------------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, TILE_SIZE);
// Run the prescan
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// **===-------------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, padded_num_elements);
// **===-----------------------------------------------------------===**
hipEventRecord(stop);
hipEventSynchronize(stop);
float device_ms = 0;
hipEventElapsedTime(&device_ms, start, stop);
printf("GPU Processing time: %f (ms)\n", device_ms);
printf("Speedup: %fX\n", host_ms/device_ms);
// **===--------------- Deallocate data structure here ------------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
hipMemcpy( h_data, d_odata, sizeof(float) * num_elements, hipMemcpyDeviceToHost);
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = CompareArrays( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
}
// Read a floating point array in from file
int ReadFile(float* arr, char* file_name, int num_elements)
{
FILE* input = fopen(file_name, "r");
if (input == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_elements; i++)
fscanf(input, "%d", &(arr[i]));
return num_elements;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
if (input == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(float* arr, char* file_name, int num_elements)
{
FILE* output = fopen(file_name, "w");
if (output == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_elements; i++) {
fprintf(output, "%f ", arr[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareArrays(float *A, float *B, int size) {
for (unsigned i = 0; i < size; i++)
if (fabs(A[i] - B[i]) > 0.001f)
return false;
return true;
}
| 3bb8971d74af45603e1f2c4a4c0de9d4fbd9ff68.cu | #ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// includes, kernels
#include "scan_largearray_kernel.cu"
#define DEFAULT_NUM_ELEMENTS 16000000
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
bool CompareArrays(float *A, float *B, int size);
void WriteFile(float* arr, char* file_name, int num_elements);
int ReadParamsFile(int* params, char* file_name, int num_params);
int ReadFile(float* arr, char* file_name, int num_elements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int num_read = 0;
int* size = (int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
data2read = ReadParamsFile(size, argv[1], data2read);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
data2read = ReadParamsFile(size, argv[1], data2read);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
num_read = ReadFile(h_data, argv[2], num_elements);
if(num_read != num_elements)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
struct timeval start_time, end_time;
gettimeofday(&start_time,NULL);
computeGold( reference, h_data, num_elements);
gettimeofday(&end_time,NULL);
printf("Processing %d elements...\n", num_elements);
double start_count = (double) start_time.tv_sec
+ 1.e-6 * (double) start_time.tv_usec;
double end_count = (double) end_time.tv_sec +
1.e-6 * (double) end_time.tv_usec;
double host_ms = (double)( (end_count - start_count) * 1000);
printf("CPU Processing time: %lf (ms)\n", host_ms);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
int padded_num_elements = TILE_SIZE*((num_elements+TILE_SIZE-1)/TILE_SIZE);
int padded_mem_size = padded_num_elements *sizeof(float);
// Make a padded copy of the input data
float* padded_hdata = (float*) malloc(padded_mem_size);
memcpy(padded_hdata, h_data, mem_size);
memset(padded_hdata+num_elements, 0, padded_mem_size - mem_size);
cudaMalloc( (void**) &d_idata, padded_mem_size);
cudaMalloc( (void**) &d_odata, padded_mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, padded_hdata, padded_mem_size, cudaMemcpyHostToDevice);
// initialize all the other device arrays to be safe
cudaMemcpy( d_odata, padded_hdata, padded_mem_size, cudaMemcpyHostToDevice);
free(padded_hdata);
padded_hdata = NULL;
// **===--------------- Allocate data structure here --------------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, TILE_SIZE);
// Run the prescan
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// **===-------------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, padded_num_elements);
// **===-----------------------------------------------------------===**
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float device_ms = 0;
cudaEventElapsedTime(&device_ms, start, stop);
printf("GPU Processing time: %f (ms)\n", device_ms);
printf("Speedup: %fX\n", host_ms/device_ms);
// **===--------------- Deallocate data structure here ------------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost);
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = CompareArrays( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
// Read a floating point array in from file
int ReadFile(float* arr, char* file_name, int num_elements)
{
FILE* input = fopen(file_name, "r");
if (input == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_elements; i++)
fscanf(input, "%d", &(arr[i]));
return num_elements;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
if (input == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(float* arr, char* file_name, int num_elements)
{
FILE* output = fopen(file_name, "w");
if (output == NULL) {
printf("Error opening file %s\n", file_name);
exit(1);
}
for (unsigned i = 0; i < num_elements; i++) {
fprintf(output, "%f ", arr[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareArrays(float *A, float *B, int size) {
for (unsigned i = 0; i < size; i++)
if (fabs(A[i] - B[i]) > 0.001f)
return false;
return true;
}
|
f740bdad79add2b4f54500248cd97e2bb6b9eadb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "elementwise_1D_1D_div.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in1 = NULL;
hipMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
hipMalloc(&in2, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
elementwise_1D_1D_div), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
elementwise_1D_1D_div), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
elementwise_1D_1D_div), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f740bdad79add2b4f54500248cd97e2bb6b9eadb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "elementwise_1D_1D_div.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in1 = NULL;
cudaMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
cudaMalloc(&in2, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
elementwise_1D_1D_div<<<gridBlock,threadBlock>>>(in1,in2,out,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
elementwise_1D_1D_div<<<gridBlock,threadBlock>>>(in1,in2,out,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
elementwise_1D_1D_div<<<gridBlock,threadBlock>>>(in1,in2,out,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9aa990edc3f0a599376c5d9b4a5eaa4a7864f6af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/WrapDimUtils.h"
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include "ATen/AccumulateType.h"
#include "ATen/hip/NumericLimits.cuh"
#include <type_traits>
namespace at {
namespace native {
namespace {
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + ::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to paralellize along all of them.
// Within a 2d block threadIdx.y paralellizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to paralellize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#ifdef __HIP_PLATFORM_HCC__
max_active_blocks = 16;
#else
hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < max_block_size) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really eqivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
T tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
for (; offset < classes; offset += blockDim.x)
output[offset] = epilogue(input[offset]);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>(
gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpGradOutput[ILP];
outscalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
AT_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data<scalar_t>(), input.data<scalar_t>(), dim_size
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data<accscalar_t>(), input.data<scalar_t>(), dim_size
);
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data<scalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data<accscalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(hipGetLastError());
}
return output;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
auto grad = grad_.contiguous();
Tensor gI = half_to_float ? at::empty_like(grad, grad.options().dtype(ScalarType::Half)) : at::empty_like(grad);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
AT_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), dim_size
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), dim_size
);
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(),
outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(),
outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(hipGetLastError());
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.type().scalarType() != input.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad.type().scalarType() == ScalarType::Float && input.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.type().scalarType() != input.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad.type().scalarType() == ScalarType::Float && input.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue>(tmp, output, dim, half_to_float);
}
}
}
| 9aa990edc3f0a599376c5d9b4a5eaa4a7864f6af.cu | #include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/WrapDimUtils.h"
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include "ATen/AccumulateType.h"
#include "ATen/cuda/NumericLimits.cuh"
#include <type_traits>
namespace at {
namespace native {
namespace {
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(std::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to paralellize along all of them.
// Within a 2d block threadIdx.y paralellizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to paralellize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#ifdef __HIP_PLATFORM_HCC__
max_active_blocks = 16;
#else
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < max_block_size) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really eqivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
T tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
for (; offset < classes; offset += blockDim.x)
output[offset] = epilogue(input[offset]);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>(
gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpGradOutput[ILP];
outscalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
AT_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data<scalar_t>(), input.data<scalar_t>(), dim_size
);
} else {
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data<accscalar_t>(), input.data<scalar_t>(), dim_size
);
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data<scalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data<accscalar_t>(), input.data<scalar_t>(), outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(cudaGetLastError());
}
return output;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
auto grad = grad_.contiguous();
Tensor gI = half_to_float ? at::empty_like(grad, grad.options().dtype(ScalarType::Half)) : at::empty_like(grad);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
AT_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(), dim_size
);
} else {
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(), dim_size
);
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data<scalar_t>(), output.data<scalar_t>(), grad.data<scalar_t>(),
outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data<scalar_t>(), output.data<accscalar_t>(), grad.data<accscalar_t>(),
outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(cudaGetLastError());
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.type().scalarType() != input.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad.type().scalarType() == ScalarType::Float && input.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.type().scalarType() != input.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad.type().scalarType() == ScalarType::Float && input.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue>(tmp, output, dim, half_to_float);
}
}
}
|
17f8adb6f39675f5d3a9c9f4aa313a21ef114929.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Test that we can disable cross-target call checks in Sema with the
// -fcuda-disable-target-call-checks flag. Without this flag we'd get a bunch
// of errors here, since there are invalid cross-target calls present.
// RUN: %clang_cc1 -fsyntax-only -verify %s -fcuda-disable-target-call-checks
// RUN: %clang_cc1 -fsyntax-only -fcuda-is-device -verify %s -fcuda-disable-target-call-checks
// expected-no-diagnostics
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __host__ __attribute__((host))
__attribute__((host)) void h1();
__attribute__((device)) void d1() {
h1();
}
__attribute__((host)) void h2() {
d1();
}
__attribute__((global)) void g1() {
h2();
}
| 17f8adb6f39675f5d3a9c9f4aa313a21ef114929.cu | // Test that we can disable cross-target call checks in Sema with the
// -fcuda-disable-target-call-checks flag. Without this flag we'd get a bunch
// of errors here, since there are invalid cross-target calls present.
// RUN: %clang_cc1 -fsyntax-only -verify %s -fcuda-disable-target-call-checks
// RUN: %clang_cc1 -fsyntax-only -fcuda-is-device -verify %s -fcuda-disable-target-call-checks
// expected-no-diagnostics
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __host__ __attribute__((host))
__attribute__((host)) void h1();
__attribute__((device)) void d1() {
h1();
}
__attribute__((host)) void h2() {
d1();
}
__attribute__((global)) void g1() {
h2();
}
|
556bd31342f1c0433c1131833c97169408bc5e26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "patchBasedRobustStatistics_gpu.cuh"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/functional.h>
#include <thrust/tuple.h>
#include <thrust/transform_reduce.h>
#include <math.h>
//The globally constant point spread function
extern __constant__ PointSpreadFunction<float> _PSF;
using namespace thrust;
template <typename T>
__global__ void resetScaleAndWeights(PatchBasedVolume<T> inputStack)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = 1.0f;
inputStack.getImagePatch2D(idx).patchWeight = 1.0f;
}
template <typename T>
__global__ void InitializeEMValuesKernel(PatchBasedVolume<T> inputStack)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
T s = inputStack.getPatchValue(pos);
if (s != -1 && s != 0)
{
inputStack.setWeightValue(pos, 1);
}
else
{
inputStack.setWeightValue(pos, 0);
}
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::initializeEMValues()
{
for (int i = 0; i < m_inputStacks.size(); i++)
{
resetScaleAndWeights << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i]);
checkCudaErrors(hipDeviceSynchronize());
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[i].getXYZPatchGridSize().x, m_inputStacks[i].getXYZPatchGridSize().y,
m_inputStacks[i].getXYZPatchGridSize().z), blockSize3);
InitializeEMValuesKernel<T> << <gridSize3, blockSize3 >> >(m_inputStacks[i]);
CHECK_ERROR(InitializeEMValuesKernel);
checkCudaErrors(hipDeviceSynchronize());
}
}
template <typename T>
inline __host__ __device__ T G_(T x, T s)
{
return __step*exp(-x*x / (2.0f*s)) / (sqrt(6.28f*s));
}
template <typename T>
inline __host__ __device__ T M_(T m)
{
return m*__step;
}
template <typename T>
__global__ void EStepKernel(PatchBasedVolume<T> inputStack, T _m, T _sigma, T _mix)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
T s = inputStack.getPatchValue(pos);
T sw = inputStack.getWeightValue(pos);
if ((s == -1) || sw <= 0)
return;
T ss = inputStack.getSimulatedPatchValue(pos);
ImagePatch2D<T> patch = inputStack.getImagePatch2D(pos.z);
T scale = patch.scale;
T patchVal = s * scale;
patchVal -= ss;
//Gaussian distribution for inliers (likelihood)
T g = G_(patchVal, _sigma);
//Uniform distribution for outliers (likelihood)
T m = M_(_m);
T weight = (g * _mix) / (g *_mix + m * (1.0 - _mix));
if (sw > 0)
{
inputStack.setWeightValue(pos, weight);
}
else
{
inputStack.setWeightValue(pos, 0.0f);
}
}
template <typename T>
struct transformPatchPotential
{
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a)
{
if (thrust::get<1>(a) > 0.99)
{
return thrust::make_tuple(((1.0 - thrust::get<0>(a)) * (1.0 - thrust::get<0>(a))), 1.0);
}
else
{
return thrust::make_tuple(0.0f, 0.0f);
}
}
};
template <typename T>
struct reducePatchPotential
{
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a, const thrust::tuple<T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a) +thrust::get<0>(b), thrust::get<1>(a) +thrust::get<1>(b));
}
};
template <typename T>
__global__ void copyFromWeightsAndScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales, T* weights)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
ImagePatch2D<T> patch = inputStack.getImagePatch2D(idx);
scales[idx + ofs] = patch.scale;
weights[idx + ofs] = patch.patchWeight;
}
template <typename T>
__global__ void copyToWeightsAndScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales, T* weights)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = scales[idx + ofs];
inputStack.getImagePatch2D(idx).patchWeight = weights[idx + ofs];
}
template <typename T>
__global__ void copyToScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = scales[idx + ofs];
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::EStep()
{
//TODO remove:
m_debug = true;
printf("EStep_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
unsigned int inputIndex;
unsigned int numPatches = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
numPatches += m_inputStacks[i].getXYZPatchGridSize().z;
}
std::vector<T> patch_potential(numPatches, 0);
for (int i = 0; i < m_inputStacks.size(); i++)
{
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[i].getXYZPatchGridSize().x, m_inputStacks[i].getXYZPatchGridSize().y,
m_inputStacks[i].getXYZPatchGridSize().z), blockSize3);
EStepKernel<T> << <gridSize3, blockSize3 >> >(m_inputStacks[i], m_m_gpu, m_sigma_gpu, m_mix_gpu);
CHECK_ERROR(EStepKernel);
checkCudaErrors(hipDeviceSynchronize());
}
for (int i = 0; i < m_inputStacks.size(); i++)
{
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
for (unsigned int j = 0; j < m_inputStacks[i].getXYZPatchGridSize().z; j++)
{
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr() + (j*N));//w->data());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr() + (j*N));//sw->data());
thrust::tuple<T, T> out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T> >(d_w, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T> >(d_w + N, d_sw + N)),
transformPatchPotential<T>(), thrust::make_tuple<T, T>(0.0, 0.0), reducePatchPotential<T>());
if (thrust::get<1>(out) > 0)
{
patch_potential[j] = sqrt(thrust::get<0>(out) / thrust::get<1>(out));
}
else
{
patch_potential[j] = -1; // patch has no unpadded voxels
}
}
}
//////////////////////////////////////////////////////////////////////
//CPU part
//can stay on CPU
//Todo force-exclude patches predefined by a user, set their potentials to -1
//for (unsigned int i = 0; i < _force_excluded.size(); i++)
// patch_potential[_force_excluded[i]] = -1;
//TODO
//exclude patches identified as having small overlap with ROI, set their potentials to -1
//for (unsigned int i = 0; i < _small_patches.size(); i++)
// patch_potential_gpu[_small_patches[i]] = -1;
T* d_scale_gpu;
T* d_patch_weight_gpu;
checkCudaErrors(hipMalloc(&d_scale_gpu, sizeof(T)*numPatches));
checkCudaErrors(hipMalloc(&d_patch_weight_gpu, sizeof(T)*numPatches));
std::vector<T> h_scale_gpu(numPatches, 0);
std::vector<T> h_patch_weight_gpu(numPatches, 0);
unsigned int ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyFromWeightsAndScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu, d_patch_weight_gpu);
checkCudaErrors(hipDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
checkCudaErrors(hipMemcpy(&h_scale_gpu[0], d_scale_gpu, sizeof(T)*numPatches, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&h_patch_weight_gpu[0], d_patch_weight_gpu, sizeof(T)*numPatches, hipMemcpyDeviceToHost));
//these are unrealistic scales pointing at misregistration - exclude the corresponding patches
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if ((h_scale_gpu[inputIndex] < 0.2) || (h_scale_gpu[inputIndex] > 5)) {
patch_potential[inputIndex] = -1;
}
// exclude unrealistic transformations
if (m_debug) {
cout << setprecision(4);
cout << endl << "Patch potentials GPU: ";
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
cout << patch_potential[inputIndex] << " ";
cout << endl << "Patch weights GPU: ";
for (inputIndex = 0; inputIndex < h_patch_weight_gpu.size(); inputIndex++)
cout << h_patch_weight_gpu[inputIndex] << " ";
cout << endl << "Patch scales GPU: ";
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
cout << h_scale_gpu[inputIndex] << " ";
cout << endl;
}
//Calulation of patch-wise robust statistics parameters.
//This is theoretically M-step,
//but we want to use latest estimate of patch potentials
//to update the parameters
//Calculate means of the inlier and outlier potentials
double sum = 0, den = 0, sum2 = 0, den2 = 0, maxs = 0, mins = 1;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
//calculate means
sum += patch_potential[inputIndex] * h_patch_weight_gpu[inputIndex];
den += h_patch_weight_gpu[inputIndex];
sum2 += patch_potential[inputIndex] * (1.0 - h_patch_weight_gpu[inputIndex]);
den2 += (1.0 - h_patch_weight_gpu[inputIndex]);
//calculate min and max of potentials in case means need to be initalized
if (patch_potential[inputIndex] > maxs)
maxs = patch_potential[inputIndex];
if (patch_potential[inputIndex] < mins)
mins = patch_potential[inputIndex];
}
if (den > 0)
m_mean_s_gpu = (T)(sum / den);
else
m_mean_s_gpu = (T)mins;
if (den2 > 0)
m_mean_s2_gpu = (T)(sum2 / den2);
else
m_mean_s2_gpu = (T)((maxs + m_mean_s_gpu) / 2.0);
//Calculate the variances of the potentials
sum = 0;
den = 0;
sum2 = 0;
den2 = 0;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
sum += (patch_potential[inputIndex] - m_mean_s_gpu) * (patch_potential[inputIndex] - m_mean_s_gpu)
* h_patch_weight_gpu[inputIndex];
den += h_patch_weight_gpu[inputIndex];
sum2 += (patch_potential[inputIndex] - m_mean_s2_gpu) * (patch_potential[inputIndex] - m_mean_s2_gpu)
* (1 - h_patch_weight_gpu[inputIndex]);
den2 += (1 - h_patch_weight_gpu[inputIndex]);
}
//_sigma_s
if ((sum > 0) && (den > 0)) {
m_sigma_s_gpu = (T)(sum / den);
//do not allow too small sigma
if (m_sigma_s_gpu < m_step * m_step / 6.28)
m_sigma_s_gpu = (T)(m_step * m_step / 6.28);
}
else {
m_sigma_s_gpu = 0.025f;
if (m_debug) {
if (sum <= 0)
cout << "All patches are equal. ";
if (den < 0) //this should not happen
cout << "All patches are outliers. ";
cout << "Setting sigma to " << sqrt(m_sigma_s_gpu) << endl;
}
}
//sigma_s2
if ((sum2 > 0) && (den2 > 0)) {
m_sigma_s2_gpu = (T)(sum2 / den2);
//do not allow too small sigma
if (m_sigma_s2_gpu < m_step * m_step / 6.28)
m_sigma_s2_gpu = (T)(m_step * m_step / 6.28);
}
else {
m_sigma_s2_gpu = (m_mean_s2_gpu - m_mean_s_gpu) * (m_mean_s2_gpu - m_mean_s_gpu) / 4;
//do not allow too small sigma
if (m_sigma_s2_gpu < m_step * m_step / 6.28)
m_sigma_s2_gpu = (T)(m_step * m_step / 6.28);
if (m_debug) {
if (sum2 <= 0)
cout << "All patches are equal. ";
if (den2 <= 0)
cout << "All patches inliers. ";
cout << "Setting sigma_s2 to " << sqrt(m_sigma_s2_gpu) << endl;
}
}
//Calculate patch weights
double gs1, gs2;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++) {
//Patch does not have any voxels in volumetric ROI
if (patch_potential[inputIndex] == -1) {
h_patch_weight_gpu[inputIndex] = 0;
continue;
}
//All patches are outliers or the means are not valid
if ((den <= 0) || (m_mean_s2_gpu <= m_mean_s_gpu)) {
h_patch_weight_gpu[inputIndex] = 1;
continue;
}
//likelihood for inliers
if (patch_potential[inputIndex] < m_mean_s2_gpu)
gs1 = G_(patch_potential[inputIndex] - m_mean_s_gpu, m_sigma_s_gpu);
else
gs1 = 0;
//likelihood for outliers
if (patch_potential[inputIndex] > m_mean_s_gpu)
gs2 = G_(patch_potential[inputIndex] - m_mean_s2_gpu, m_sigma_s2_gpu);
else
gs2 = 0;
//calculate patch weight
double likelihood = gs1 * m_mix_s_gpu + gs2 * (1 - m_mix_s_gpu);
if (likelihood > 0)
h_patch_weight_gpu[inputIndex] = (T)(gs1 * m_mix_s_gpu / likelihood);
else {
if (patch_potential[inputIndex] <= m_mean_s_gpu)
h_patch_weight_gpu[inputIndex] = 1;
if (patch_potential[inputIndex] >= m_mean_s2_gpu)
h_patch_weight_gpu[inputIndex] = 0;
if ((patch_potential[inputIndex] < m_mean_s2_gpu) && (patch_potential[inputIndex] > m_mean_s_gpu)) //should not happen
h_patch_weight_gpu[inputIndex] = 1;
}
}
//Update _mix_s this should also be part of MStep
sum = 0;
int num = 0;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
sum += h_patch_weight_gpu[inputIndex];
num++;
}
if (num > 0)
m_mix_s_gpu = (T)(sum / num);
else {
cout << "All patches are outliers. Setting _mix_s to 0.9." << endl;
m_mix_s_gpu = 0.9f;
}
if (m_debug) {
cout << setprecision(3);
cout << "Patch robust statistics parameters GPU: ";
cout << "means: " << m_mean_s_gpu << " " << m_mean_s2_gpu << " ";
cout << "sigmas: " << sqrt(m_sigma_s_gpu) << " " << sqrt(m_sigma_s2_gpu) << " ";
cout << "proportions: " << m_mix_s_gpu << " " << 1 - m_mix_s_gpu << endl;
cout << "Patch weights GPU: ";
for (inputIndex = 0; inputIndex < h_patch_weight_gpu.size(); inputIndex++)
cout << h_patch_weight_gpu[inputIndex] << " ";
cout << endl;
}
//update GPU patches
checkCudaErrors(hipMemcpy(d_scale_gpu, &h_scale_gpu[0], sizeof(T)*numPatches, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_patch_weight_gpu, &h_patch_weight_gpu[0], sizeof(T)*numPatches, hipMemcpyHostToDevice));
ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyToWeightsAndScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu, d_patch_weight_gpu);
checkCudaErrors(hipDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
checkCudaErrors(hipFree(d_scale_gpu));
checkCudaErrors(hipFree(d_patch_weight_gpu));
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformMStep3DNoBias
{
__host__ __device__
thrust::tuple<T, T, T, T, T> operator()(const thrust::tuple<T, T, T, T, T>& v)
//thrust::tuple<sigma_, mix_, count, e, e> //this order is very important for the thrust optimization
{
const T s_ = thrust::get<0>(v);
const T w_ = thrust::get<1>(v);
const T ss_ = thrust::get<2>(v);
const T sw_ = thrust::get<3>(v);
const T scale = thrust::get<4>(v);
T sigma_ = 0.0f;
T mix_ = 0.0f;
T count = 0.0f;
T e = 0.0f;
thrust::tuple<T, T, T, T, T> t;
if (s_ != -1.0f && sw_ > 0.99f)
{
e = (s_ * scale) - ss_;
sigma_ = e * e * w_;
mix_ = w_;
count = 1.0;
T e1 = e;
t = thrust::make_tuple(sigma_, mix_, count, e, e1);
}
else
{
t = thrust::make_tuple(0.0, 0.0, 0.0, FLT_MAX, FLT_MIN);
}
return t;
}
};
template <typename T>
struct reduceMStep
{
__host__ __device__
thrust::tuple<T, T, T, T, T> operator()(const thrust::tuple<T, T, T, T, T>& a,
const thrust::tuple<T, T, T, T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b),
thrust::get<2>(a)+thrust::get<2>(b), min(thrust::get<3>(a), thrust::get<3>(b)),
max(thrust::get<4>(a), thrust::get<4>(b)));
}
};
template <typename T>
__global__ void initBufferWithScales(PatchBasedVolume<T> inputStack)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
inputStack.setBufferValue(pos, inputStack.getImagePatch2D(pos.z).scale);
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::MStep(int iter)
{
printf("MStep_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
T num = 0;
T min_ = std::numeric_limits<T>::max();
T max_ = std::numeric_limits<T>::min();
T sigma = 0;
T mix = 0;
thrust::tuple<T, T, T, T, T> results;
for (int i = 0; i < m_inputStacks.size(); i++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr());
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr());
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr());
thrust::device_ptr<T> d_buf(m_inputStacks[i].getBufferPtr());
unsigned int N1 = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
//thrust::constant_iterator<int> first(h_scales[j]);
for (unsigned int ii = 0; ii < m_inputStacks.size(); ii++)
{
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[ii].getXYZPatchGridSize().x, m_inputStacks[ii].getXYZPatchGridSize().y,
m_inputStacks[ii].getXYZPatchGridSize().z), blockSize3);
initBufferWithScales<T> << <gridSize3, blockSize3 >> >(m_inputStacks[ii]);
CHECK_ERROR(initBufferWithScales);
checkCudaErrors(hipDeviceSynchronize());
}
unsigned int N3 = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y*m_inputStacks[i].getXYZPatchGridSize().z;
results = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_s, d_w, d_ss, d_sw, d_buf)),
thrust::make_zip_iterator(thrust::make_tuple(d_s + N3, d_w + N3, d_ss + N3, d_sw + N3, d_buf + N3)), transformMStep3DNoBias<T>(),
thrust::make_tuple<T, T, T, T, T>(0.0, 0.0, 0.0, 0.0, 0.0), reduceMStep<T>());
sigma += get<0>(results);
mix += get<1>(results);
num += get<2>(results);
min_ = min(min_, get<3>(results));
max_ = max(max_, get<4>(results));
}
if (mix > 0) {
m_sigma_gpu = sigma / mix;
}
else {
printf("Something went wrong: sigma= %f mix= %f\n", sigma, mix);
//exit(1);
}
if (m_sigma_gpu < m_step * m_step / 6.28f)
m_sigma_gpu = m_step * m_step / 6.28f;
if (iter > 1)
m_mix_gpu = mix / num;
//Calculate m
m_m_gpu = 1.0f / (max_ - min_);
std::cout.precision(10);
if (m_debug) {
cout << "Voxel-wise robust statistics parameters GPU: ";
cout << "sigma = " << sqrt(m_sigma_gpu) << " mix = " << m_mix_gpu << " ";
cout << " m = " << m_m_gpu << endl;
}
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformScalenoBias
{
transformScalenoBias(){}
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T, T, T>& v)
{
T s_ = thrust::get<0>(v);
const T w_ = thrust::get<1>(v);
const T ss_ = thrust::get<2>(v);
const T sw_ = thrust::get<3>(v);
if ((s_ == -1.0f) || sw_ <= 0.99f)
{
return thrust::make_tuple(0.0f, 0.0f);
}
else
{
T scalenum = w_ * s_ * ss_;
T scaleden = w_ * s_ * s_;
return thrust::make_tuple(scalenum, scaleden);
}
}
};
template <typename T>
struct reduceScale
{
reduceScale(){}
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a, const thrust::tuple<T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
void patchBasedRobustStatistics_gpu<T>::Scale()
{
printf("Scale_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
std::vector<T> scale_vec;
//TODO reduce this on GPU
for (int i = 0; i < m_inputStacks.size(); i++)
{
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
for (int j = 0; j < m_inputStacks[i].getXYZPatchGridSize().z; j++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr() + (j*N));
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr() + (j*N));
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr() + (j*N));
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr() + (j*N));
thrust::tuple<T, T> out = thrust::make_tuple<T, T>(0.0f, 0.0f);
out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s, d_w, d_ss, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s + N, d_w + N, d_ss + N, d_sw + N)), transformScalenoBias<T>(),
thrust::make_tuple<T, T>(0.0, 0.0), reduceScale<T>());
if (thrust::get<1>(out) != 0.0)
{
scale_vec.push_back(thrust::get<0>(out) / thrust::get<1>(out));
//scale_vec[j] = thrust::get<0>(out) / thrust::get<1>(out);
}
else
{
scale_vec.push_back(1.0);
//scale_vec[j] = 1.0;
}
}
}
unsigned int numPatches = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
numPatches += m_inputStacks[i].getXYZPatchGridSize().z;
}
T* d_scale_gpu;
checkCudaErrors(hipMalloc(&d_scale_gpu, sizeof(T)*numPatches));
checkCudaErrors(hipMemcpy(d_scale_gpu, &scale_vec[0], sizeof(T)*numPatches, hipMemcpyHostToDevice));
//copyToPatches
unsigned int ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyToScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu);
checkCudaErrors(hipDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
if (m_debug ) {
cout << setprecision(3);
cout << "Patch scale GPU = ";
for (unsigned int inputIndex = 0; inputIndex < scale_vec.size(); ++inputIndex)
cout << inputIndex << ":" << scale_vec[inputIndex] << " ";
cout << endl;
}
checkCudaErrors(hipFree(d_scale_gpu));
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformRS
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, char, T, T>& v)
{
T s_ = thrust::get<0>(v);
char si_ = thrust::get<1>(v);
T ss_ = thrust::get<2>(v);
T sw_ = thrust::get<3>(v);
thrust::tuple<T, unsigned int> t = thrust::make_tuple(0.0f, 0);
if (s_ != -1 && si_ == 1 && sw_ > 0.99)
{
T sval = s_ - ss_;
t = thrust::make_tuple<T, unsigned int>(sval*sval, 1);
}
return t; // return t <sigma,num>
}
};
template <typename T>
struct reduceRS
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, unsigned int>& a,
const thrust::tuple<T, unsigned int>& b)
{
return thrust::make_tuple<T, unsigned int>(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
struct square
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, unsigned int>& a,
const thrust::tuple<T, unsigned int>& b)
{
return make_tuple<T, unsigned int>(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
void patchBasedRobustStatistics_gpu<T>::InitializeRobustStatistics(T _min_intensity, T _max_intensity, int cuda_device)
{
//TODO
//if patch does not have an overlap with ROI, set its weight to zero
//Force exclusion of patches predefined by user
T sa = 0;
T sb = 0;
m_cuda_device = cuda_device;
checkCudaErrors(hipSetDevice(m_cuda_device));
for (int i = 0; i < m_inputStacks.size(); i++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr());
thrust::device_ptr<char> d_si(m_inputStacks[i].getSimInsidePtr());
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr());
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y*
m_inputStacks[i].getXYZPatchGridSize().z;
//thrust::make_zip_iterator(thrust::make_tuple(d_s, d_si, d_ss, d_sw));
//thrust::make_zip_iterator(thrust::make_tuple(d_s + N, d_si + N, d_ss + N, d_sw + N));
//thrust::transform_reduce(InputIterator first, InputIterator last, UnaryFunction unary_op, OutputType init, BinaryFunction binary_op)
thrust::tuple<T, unsigned int> out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<char>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s, d_si, d_ss, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<char>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s + N, d_si + N, d_ss + N, d_sw + N)),
transformRS<T>(),
thrust::make_tuple<T, unsigned int>(0.0, 0),
reduceRS<T>());
sa += get<0>(out);
sb += (T)get<1>(out);
}
// printf("sa = %f - sb = %f\n",sa,sb);
if (sb == 0)
{
printf("ERROR: sb = 0!! no sigma computed! exiting!\n");
exit(-1);
}
//initialize sigma for inlier voxel errors
m_sigma_gpu = sa / sb;
//initialize sigma for patch-wise robust statistics
m_sigma_s_gpu = 0.025f;
//initialize mixing proportion for inlier class in voxel-wise robust statistics (correctly matched voxels)
m_mix_gpu = 0.9f;
//initialize mixing proportion for outlier class in patch-wise robust statistics
m_mix_s_gpu = 0.9f;
//Initialize value for uniform distribution according to the range of intensities
m_m_gpu = (T)(1.0f / (2.1f * _max_intensity - 1.9f * _min_intensity));
if (m_debug)
{
std::cout << "Initializing robust statistics GPU: " << "sigma=" << sqrt(m_sigma_gpu) << " " << "m=" << m_m_gpu
<< " " << "mix=" << m_mix_gpu << " " << "mix_s=" << m_mix_s_gpu << std::endl;
}
}
template <typename T>
patchBasedRobustStatistics_gpu<T>::patchBasedRobustStatistics_gpu(std::vector<PatchBasedVolume<T> > & _inputStacks) :
m_inputStacks(_inputStacks), m_debug(false)
{
m_step = 0.0001;
strm_buffer = cout.rdbuf();
strm_buffer_e = cerr.rdbuf();
file.open("log-EM.txt");
file_e.open("log-EM-error.txt");
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::updateInputStacks(std::vector<PatchBasedVolume<T> > & _inputStacks)
{
m_inputStacks.clear();
m_inputStacks = _inputStacks;
}
template <typename T>
patchBasedRobustStatistics_gpu<T>::~patchBasedRobustStatistics_gpu()
{
}
template class patchBasedRobustStatistics_gpu < float > ;
template class patchBasedRobustStatistics_gpu < double > ; | 556bd31342f1c0433c1131833c97169408bc5e26.cu | /*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "patchBasedRobustStatistics_gpu.cuh"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/functional.h>
#include <thrust/tuple.h>
#include <thrust/transform_reduce.h>
#include <math.h>
//The globally constant point spread function
extern __constant__ PointSpreadFunction<float> _PSF;
using namespace thrust;
template <typename T>
__global__ void resetScaleAndWeights(PatchBasedVolume<T> inputStack)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = 1.0f;
inputStack.getImagePatch2D(idx).patchWeight = 1.0f;
}
template <typename T>
__global__ void InitializeEMValuesKernel(PatchBasedVolume<T> inputStack)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
T s = inputStack.getPatchValue(pos);
if (s != -1 && s != 0)
{
inputStack.setWeightValue(pos, 1);
}
else
{
inputStack.setWeightValue(pos, 0);
}
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::initializeEMValues()
{
for (int i = 0; i < m_inputStacks.size(); i++)
{
resetScaleAndWeights << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i]);
checkCudaErrors(cudaDeviceSynchronize());
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[i].getXYZPatchGridSize().x, m_inputStacks[i].getXYZPatchGridSize().y,
m_inputStacks[i].getXYZPatchGridSize().z), blockSize3);
InitializeEMValuesKernel<T> << <gridSize3, blockSize3 >> >(m_inputStacks[i]);
CHECK_ERROR(InitializeEMValuesKernel);
checkCudaErrors(cudaDeviceSynchronize());
}
}
template <typename T>
inline __host__ __device__ T G_(T x, T s)
{
return __step*exp(-x*x / (2.0f*s)) / (sqrt(6.28f*s));
}
template <typename T>
inline __host__ __device__ T M_(T m)
{
return m*__step;
}
template <typename T>
__global__ void EStepKernel(PatchBasedVolume<T> inputStack, T _m, T _sigma, T _mix)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
T s = inputStack.getPatchValue(pos);
T sw = inputStack.getWeightValue(pos);
if ((s == -1) || sw <= 0)
return;
T ss = inputStack.getSimulatedPatchValue(pos);
ImagePatch2D<T> patch = inputStack.getImagePatch2D(pos.z);
T scale = patch.scale;
T patchVal = s * scale;
patchVal -= ss;
//Gaussian distribution for inliers (likelihood)
T g = G_(patchVal, _sigma);
//Uniform distribution for outliers (likelihood)
T m = M_(_m);
T weight = (g * _mix) / (g *_mix + m * (1.0 - _mix));
if (sw > 0)
{
inputStack.setWeightValue(pos, weight);
}
else
{
inputStack.setWeightValue(pos, 0.0f);
}
}
template <typename T>
struct transformPatchPotential
{
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a)
{
if (thrust::get<1>(a) > 0.99)
{
return thrust::make_tuple(((1.0 - thrust::get<0>(a)) * (1.0 - thrust::get<0>(a))), 1.0);
}
else
{
return thrust::make_tuple(0.0f, 0.0f);
}
}
};
template <typename T>
struct reducePatchPotential
{
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a, const thrust::tuple<T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a) +thrust::get<0>(b), thrust::get<1>(a) +thrust::get<1>(b));
}
};
template <typename T>
__global__ void copyFromWeightsAndScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales, T* weights)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
ImagePatch2D<T> patch = inputStack.getImagePatch2D(idx);
scales[idx + ofs] = patch.scale;
weights[idx + ofs] = patch.patchWeight;
}
template <typename T>
__global__ void copyToWeightsAndScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales, T* weights)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = scales[idx + ofs];
inputStack.getImagePatch2D(idx).patchWeight = weights[idx + ofs];
}
template <typename T>
__global__ void copyToScales(PatchBasedVolume<T> inputStack, unsigned int ofs, T* scales)
{
const unsigned int idx = blockIdx.x* blockDim.x + threadIdx.x;
uint3 vSize = inputStack.getXYZPatchGridSize();
if (idx >= vSize.z)
return;
inputStack.getImagePatch2D(idx).scale = scales[idx + ofs];
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::EStep()
{
//TODO remove:
m_debug = true;
printf("EStep_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
unsigned int inputIndex;
unsigned int numPatches = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
numPatches += m_inputStacks[i].getXYZPatchGridSize().z;
}
std::vector<T> patch_potential(numPatches, 0);
for (int i = 0; i < m_inputStacks.size(); i++)
{
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[i].getXYZPatchGridSize().x, m_inputStacks[i].getXYZPatchGridSize().y,
m_inputStacks[i].getXYZPatchGridSize().z), blockSize3);
EStepKernel<T> << <gridSize3, blockSize3 >> >(m_inputStacks[i], m_m_gpu, m_sigma_gpu, m_mix_gpu);
CHECK_ERROR(EStepKernel);
checkCudaErrors(cudaDeviceSynchronize());
}
for (int i = 0; i < m_inputStacks.size(); i++)
{
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
for (unsigned int j = 0; j < m_inputStacks[i].getXYZPatchGridSize().z; j++)
{
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr() + (j*N));//w->data());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr() + (j*N));//sw->data());
thrust::tuple<T, T> out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T> >(d_w, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T> >(d_w + N, d_sw + N)),
transformPatchPotential<T>(), thrust::make_tuple<T, T>(0.0, 0.0), reducePatchPotential<T>());
if (thrust::get<1>(out) > 0)
{
patch_potential[j] = sqrt(thrust::get<0>(out) / thrust::get<1>(out));
}
else
{
patch_potential[j] = -1; // patch has no unpadded voxels
}
}
}
//////////////////////////////////////////////////////////////////////
//CPU part
//can stay on CPU
//Todo force-exclude patches predefined by a user, set their potentials to -1
//for (unsigned int i = 0; i < _force_excluded.size(); i++)
// patch_potential[_force_excluded[i]] = -1;
//TODO
//exclude patches identified as having small overlap with ROI, set their potentials to -1
//for (unsigned int i = 0; i < _small_patches.size(); i++)
// patch_potential_gpu[_small_patches[i]] = -1;
T* d_scale_gpu;
T* d_patch_weight_gpu;
checkCudaErrors(cudaMalloc(&d_scale_gpu, sizeof(T)*numPatches));
checkCudaErrors(cudaMalloc(&d_patch_weight_gpu, sizeof(T)*numPatches));
std::vector<T> h_scale_gpu(numPatches, 0);
std::vector<T> h_patch_weight_gpu(numPatches, 0);
unsigned int ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyFromWeightsAndScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu, d_patch_weight_gpu);
checkCudaErrors(cudaDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
checkCudaErrors(cudaMemcpy(&h_scale_gpu[0], d_scale_gpu, sizeof(T)*numPatches, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&h_patch_weight_gpu[0], d_patch_weight_gpu, sizeof(T)*numPatches, cudaMemcpyDeviceToHost));
//these are unrealistic scales pointing at misregistration - exclude the corresponding patches
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if ((h_scale_gpu[inputIndex] < 0.2) || (h_scale_gpu[inputIndex] > 5)) {
patch_potential[inputIndex] = -1;
}
// exclude unrealistic transformations
if (m_debug) {
cout << setprecision(4);
cout << endl << "Patch potentials GPU: ";
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
cout << patch_potential[inputIndex] << " ";
cout << endl << "Patch weights GPU: ";
for (inputIndex = 0; inputIndex < h_patch_weight_gpu.size(); inputIndex++)
cout << h_patch_weight_gpu[inputIndex] << " ";
cout << endl << "Patch scales GPU: ";
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
cout << h_scale_gpu[inputIndex] << " ";
cout << endl;
}
//Calulation of patch-wise robust statistics parameters.
//This is theoretically M-step,
//but we want to use latest estimate of patch potentials
//to update the parameters
//Calculate means of the inlier and outlier potentials
double sum = 0, den = 0, sum2 = 0, den2 = 0, maxs = 0, mins = 1;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
//calculate means
sum += patch_potential[inputIndex] * h_patch_weight_gpu[inputIndex];
den += h_patch_weight_gpu[inputIndex];
sum2 += patch_potential[inputIndex] * (1.0 - h_patch_weight_gpu[inputIndex]);
den2 += (1.0 - h_patch_weight_gpu[inputIndex]);
//calculate min and max of potentials in case means need to be initalized
if (patch_potential[inputIndex] > maxs)
maxs = patch_potential[inputIndex];
if (patch_potential[inputIndex] < mins)
mins = patch_potential[inputIndex];
}
if (den > 0)
m_mean_s_gpu = (T)(sum / den);
else
m_mean_s_gpu = (T)mins;
if (den2 > 0)
m_mean_s2_gpu = (T)(sum2 / den2);
else
m_mean_s2_gpu = (T)((maxs + m_mean_s_gpu) / 2.0);
//Calculate the variances of the potentials
sum = 0;
den = 0;
sum2 = 0;
den2 = 0;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
sum += (patch_potential[inputIndex] - m_mean_s_gpu) * (patch_potential[inputIndex] - m_mean_s_gpu)
* h_patch_weight_gpu[inputIndex];
den += h_patch_weight_gpu[inputIndex];
sum2 += (patch_potential[inputIndex] - m_mean_s2_gpu) * (patch_potential[inputIndex] - m_mean_s2_gpu)
* (1 - h_patch_weight_gpu[inputIndex]);
den2 += (1 - h_patch_weight_gpu[inputIndex]);
}
//_sigma_s
if ((sum > 0) && (den > 0)) {
m_sigma_s_gpu = (T)(sum / den);
//do not allow too small sigma
if (m_sigma_s_gpu < m_step * m_step / 6.28)
m_sigma_s_gpu = (T)(m_step * m_step / 6.28);
}
else {
m_sigma_s_gpu = 0.025f;
if (m_debug) {
if (sum <= 0)
cout << "All patches are equal. ";
if (den < 0) //this should not happen
cout << "All patches are outliers. ";
cout << "Setting sigma to " << sqrt(m_sigma_s_gpu) << endl;
}
}
//sigma_s2
if ((sum2 > 0) && (den2 > 0)) {
m_sigma_s2_gpu = (T)(sum2 / den2);
//do not allow too small sigma
if (m_sigma_s2_gpu < m_step * m_step / 6.28)
m_sigma_s2_gpu = (T)(m_step * m_step / 6.28);
}
else {
m_sigma_s2_gpu = (m_mean_s2_gpu - m_mean_s_gpu) * (m_mean_s2_gpu - m_mean_s_gpu) / 4;
//do not allow too small sigma
if (m_sigma_s2_gpu < m_step * m_step / 6.28)
m_sigma_s2_gpu = (T)(m_step * m_step / 6.28);
if (m_debug) {
if (sum2 <= 0)
cout << "All patches are equal. ";
if (den2 <= 0)
cout << "All patches inliers. ";
cout << "Setting sigma_s2 to " << sqrt(m_sigma_s2_gpu) << endl;
}
}
//Calculate patch weights
double gs1, gs2;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++) {
//Patch does not have any voxels in volumetric ROI
if (patch_potential[inputIndex] == -1) {
h_patch_weight_gpu[inputIndex] = 0;
continue;
}
//All patches are outliers or the means are not valid
if ((den <= 0) || (m_mean_s2_gpu <= m_mean_s_gpu)) {
h_patch_weight_gpu[inputIndex] = 1;
continue;
}
//likelihood for inliers
if (patch_potential[inputIndex] < m_mean_s2_gpu)
gs1 = G_(patch_potential[inputIndex] - m_mean_s_gpu, m_sigma_s_gpu);
else
gs1 = 0;
//likelihood for outliers
if (patch_potential[inputIndex] > m_mean_s_gpu)
gs2 = G_(patch_potential[inputIndex] - m_mean_s2_gpu, m_sigma_s2_gpu);
else
gs2 = 0;
//calculate patch weight
double likelihood = gs1 * m_mix_s_gpu + gs2 * (1 - m_mix_s_gpu);
if (likelihood > 0)
h_patch_weight_gpu[inputIndex] = (T)(gs1 * m_mix_s_gpu / likelihood);
else {
if (patch_potential[inputIndex] <= m_mean_s_gpu)
h_patch_weight_gpu[inputIndex] = 1;
if (patch_potential[inputIndex] >= m_mean_s2_gpu)
h_patch_weight_gpu[inputIndex] = 0;
if ((patch_potential[inputIndex] < m_mean_s2_gpu) && (patch_potential[inputIndex] > m_mean_s_gpu)) //should not happen
h_patch_weight_gpu[inputIndex] = 1;
}
}
//Update _mix_s this should also be part of MStep
sum = 0;
int num = 0;
for (inputIndex = 0; inputIndex < patch_potential.size(); inputIndex++)
if (patch_potential[inputIndex] >= 0) {
sum += h_patch_weight_gpu[inputIndex];
num++;
}
if (num > 0)
m_mix_s_gpu = (T)(sum / num);
else {
cout << "All patches are outliers. Setting _mix_s to 0.9." << endl;
m_mix_s_gpu = 0.9f;
}
if (m_debug) {
cout << setprecision(3);
cout << "Patch robust statistics parameters GPU: ";
cout << "means: " << m_mean_s_gpu << " " << m_mean_s2_gpu << " ";
cout << "sigmas: " << sqrt(m_sigma_s_gpu) << " " << sqrt(m_sigma_s2_gpu) << " ";
cout << "proportions: " << m_mix_s_gpu << " " << 1 - m_mix_s_gpu << endl;
cout << "Patch weights GPU: ";
for (inputIndex = 0; inputIndex < h_patch_weight_gpu.size(); inputIndex++)
cout << h_patch_weight_gpu[inputIndex] << " ";
cout << endl;
}
//update GPU patches
checkCudaErrors(cudaMemcpy(d_scale_gpu, &h_scale_gpu[0], sizeof(T)*numPatches, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_patch_weight_gpu, &h_patch_weight_gpu[0], sizeof(T)*numPatches, cudaMemcpyHostToDevice));
ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyToWeightsAndScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu, d_patch_weight_gpu);
checkCudaErrors(cudaDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
checkCudaErrors(cudaFree(d_scale_gpu));
checkCudaErrors(cudaFree(d_patch_weight_gpu));
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformMStep3DNoBias
{
__host__ __device__
thrust::tuple<T, T, T, T, T> operator()(const thrust::tuple<T, T, T, T, T>& v)
//thrust::tuple<sigma_, mix_, count, e, e> //this order is very important for the thrust optimization
{
const T s_ = thrust::get<0>(v);
const T w_ = thrust::get<1>(v);
const T ss_ = thrust::get<2>(v);
const T sw_ = thrust::get<3>(v);
const T scale = thrust::get<4>(v);
T sigma_ = 0.0f;
T mix_ = 0.0f;
T count = 0.0f;
T e = 0.0f;
thrust::tuple<T, T, T, T, T> t;
if (s_ != -1.0f && sw_ > 0.99f)
{
e = (s_ * scale) - ss_;
sigma_ = e * e * w_;
mix_ = w_;
count = 1.0;
T e1 = e;
t = thrust::make_tuple(sigma_, mix_, count, e, e1);
}
else
{
t = thrust::make_tuple(0.0, 0.0, 0.0, FLT_MAX, FLT_MIN);
}
return t;
}
};
template <typename T>
struct reduceMStep
{
__host__ __device__
thrust::tuple<T, T, T, T, T> operator()(const thrust::tuple<T, T, T, T, T>& a,
const thrust::tuple<T, T, T, T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b),
thrust::get<2>(a)+thrust::get<2>(b), min(thrust::get<3>(a), thrust::get<3>(b)),
max(thrust::get<4>(a), thrust::get<4>(b)));
}
};
template <typename T>
__global__ void initBufferWithScales(PatchBasedVolume<T> inputStack)
{
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (pos.x >= vSize.x || pos.y >= vSize.y || pos.z >= vSize.z)
return;
inputStack.setBufferValue(pos, inputStack.getImagePatch2D(pos.z).scale);
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::MStep(int iter)
{
printf("MStep_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
T num = 0;
T min_ = std::numeric_limits<T>::max();
T max_ = std::numeric_limits<T>::min();
T sigma = 0;
T mix = 0;
thrust::tuple<T, T, T, T, T> results;
for (int i = 0; i < m_inputStacks.size(); i++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr());
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr());
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr());
thrust::device_ptr<T> d_buf(m_inputStacks[i].getBufferPtr());
unsigned int N1 = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
//thrust::constant_iterator<int> first(h_scales[j]);
for (unsigned int ii = 0; ii < m_inputStacks.size(); ii++)
{
dim3 blockSize3 = dim3(8, 8, 8);
dim3 gridSize3 = divup(dim3(m_inputStacks[ii].getXYZPatchGridSize().x, m_inputStacks[ii].getXYZPatchGridSize().y,
m_inputStacks[ii].getXYZPatchGridSize().z), blockSize3);
initBufferWithScales<T> << <gridSize3, blockSize3 >> >(m_inputStacks[ii]);
CHECK_ERROR(initBufferWithScales);
checkCudaErrors(cudaDeviceSynchronize());
}
unsigned int N3 = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y*m_inputStacks[i].getXYZPatchGridSize().z;
results = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_s, d_w, d_ss, d_sw, d_buf)),
thrust::make_zip_iterator(thrust::make_tuple(d_s + N3, d_w + N3, d_ss + N3, d_sw + N3, d_buf + N3)), transformMStep3DNoBias<T>(),
thrust::make_tuple<T, T, T, T, T>(0.0, 0.0, 0.0, 0.0, 0.0), reduceMStep<T>());
sigma += get<0>(results);
mix += get<1>(results);
num += get<2>(results);
min_ = min(min_, get<3>(results));
max_ = max(max_, get<4>(results));
}
if (mix > 0) {
m_sigma_gpu = sigma / mix;
}
else {
printf("Something went wrong: sigma= %f mix= %f\n", sigma, mix);
//exit(1);
}
if (m_sigma_gpu < m_step * m_step / 6.28f)
m_sigma_gpu = m_step * m_step / 6.28f;
if (iter > 1)
m_mix_gpu = mix / num;
//Calculate m
m_m_gpu = 1.0f / (max_ - min_);
std::cout.precision(10);
if (m_debug) {
cout << "Voxel-wise robust statistics parameters GPU: ";
cout << "sigma = " << sqrt(m_sigma_gpu) << " mix = " << m_mix_gpu << " ";
cout << " m = " << m_m_gpu << endl;
}
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformScalenoBias
{
transformScalenoBias(){}
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T, T, T>& v)
{
T s_ = thrust::get<0>(v);
const T w_ = thrust::get<1>(v);
const T ss_ = thrust::get<2>(v);
const T sw_ = thrust::get<3>(v);
if ((s_ == -1.0f) || sw_ <= 0.99f)
{
return thrust::make_tuple(0.0f, 0.0f);
}
else
{
T scalenum = w_ * s_ * ss_;
T scaleden = w_ * s_ * s_;
return thrust::make_tuple(scalenum, scaleden);
}
}
};
template <typename T>
struct reduceScale
{
reduceScale(){}
__host__ __device__
thrust::tuple<T, T> operator()(const thrust::tuple<T, T>& a, const thrust::tuple<T, T>& b)
{
return thrust::make_tuple(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
void patchBasedRobustStatistics_gpu<T>::Scale()
{
printf("Scale_gpu\n");
cerr.rdbuf(file_e.rdbuf());
cout.rdbuf(file.rdbuf());
std::vector<T> scale_vec;
//TODO reduce this on GPU
for (int i = 0; i < m_inputStacks.size(); i++)
{
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y;
for (int j = 0; j < m_inputStacks[i].getXYZPatchGridSize().z; j++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr() + (j*N));
thrust::device_ptr<T> d_w(m_inputStacks[i].getWeigthDataPtr() + (j*N));
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr() + (j*N));
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr() + (j*N));
thrust::tuple<T, T> out = thrust::make_tuple<T, T>(0.0f, 0.0f);
out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s, d_w, d_ss, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s + N, d_w + N, d_ss + N, d_sw + N)), transformScalenoBias<T>(),
thrust::make_tuple<T, T>(0.0, 0.0), reduceScale<T>());
if (thrust::get<1>(out) != 0.0)
{
scale_vec.push_back(thrust::get<0>(out) / thrust::get<1>(out));
//scale_vec[j] = thrust::get<0>(out) / thrust::get<1>(out);
}
else
{
scale_vec.push_back(1.0);
//scale_vec[j] = 1.0;
}
}
}
unsigned int numPatches = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
numPatches += m_inputStacks[i].getXYZPatchGridSize().z;
}
T* d_scale_gpu;
checkCudaErrors(cudaMalloc(&d_scale_gpu, sizeof(T)*numPatches));
checkCudaErrors(cudaMemcpy(d_scale_gpu, &scale_vec[0], sizeof(T)*numPatches, cudaMemcpyHostToDevice));
//copyToPatches
unsigned int ofs = 0;
for (int i = 0; i < m_inputStacks.size(); i++)
{
copyToScales << < divup(m_inputStacks[i].getXYZPatchGridSize().z, 512), 512 >> >(m_inputStacks[i], ofs, d_scale_gpu);
checkCudaErrors(cudaDeviceSynchronize());
ofs += m_inputStacks[i].getXYZPatchGridSize().z;
}
if (m_debug ) {
cout << setprecision(3);
cout << "Patch scale GPU = ";
for (unsigned int inputIndex = 0; inputIndex < scale_vec.size(); ++inputIndex)
cout << inputIndex << ":" << scale_vec[inputIndex] << " ";
cout << endl;
}
checkCudaErrors(cudaFree(d_scale_gpu));
cout.rdbuf(strm_buffer);
cerr.rdbuf(strm_buffer_e);
}
template <typename T>
struct transformRS
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, char, T, T>& v)
{
T s_ = thrust::get<0>(v);
char si_ = thrust::get<1>(v);
T ss_ = thrust::get<2>(v);
T sw_ = thrust::get<3>(v);
thrust::tuple<T, unsigned int> t = thrust::make_tuple(0.0f, 0);
if (s_ != -1 && si_ == 1 && sw_ > 0.99)
{
T sval = s_ - ss_;
t = thrust::make_tuple<T, unsigned int>(sval*sval, 1);
}
return t; // return t <sigma,num>
}
};
template <typename T>
struct reduceRS
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, unsigned int>& a,
const thrust::tuple<T, unsigned int>& b)
{
return thrust::make_tuple<T, unsigned int>(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
struct square
{
__host__ __device__
thrust::tuple<T, unsigned int> operator()(const thrust::tuple<T, unsigned int>& a,
const thrust::tuple<T, unsigned int>& b)
{
return make_tuple<T, unsigned int>(thrust::get<0>(a)+thrust::get<0>(b), thrust::get<1>(a)+thrust::get<1>(b));
}
};
template <typename T>
void patchBasedRobustStatistics_gpu<T>::InitializeRobustStatistics(T _min_intensity, T _max_intensity, int cuda_device)
{
//TODO
//if patch does not have an overlap with ROI, set its weight to zero
//Force exclusion of patches predefined by user
T sa = 0;
T sb = 0;
m_cuda_device = cuda_device;
checkCudaErrors(cudaSetDevice(m_cuda_device));
for (int i = 0; i < m_inputStacks.size(); i++)
{
thrust::device_ptr<T> d_s(m_inputStacks[i].getPatchesPtr());
thrust::device_ptr<char> d_si(m_inputStacks[i].getSimInsidePtr());
thrust::device_ptr<T> d_ss(m_inputStacks[i].getSimPatchesPtr());
thrust::device_ptr<T> d_sw(m_inputStacks[i].getSimWeightsPtr());
unsigned int N = m_inputStacks[i].getXYZPatchGridSize().x*m_inputStacks[i].getXYZPatchGridSize().y*
m_inputStacks[i].getXYZPatchGridSize().z;
//thrust::make_zip_iterator(thrust::make_tuple(d_s, d_si, d_ss, d_sw));
//thrust::make_zip_iterator(thrust::make_tuple(d_s + N, d_si + N, d_ss + N, d_sw + N));
//thrust::transform_reduce(InputIterator first, InputIterator last, UnaryFunction unary_op, OutputType init, BinaryFunction binary_op)
thrust::tuple<T, unsigned int> out = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<char>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s, d_si, d_ss, d_sw)),
thrust::make_zip_iterator(thrust::make_tuple<thrust::device_ptr<T>, thrust::device_ptr<char>, thrust::device_ptr<T>, thrust::device_ptr<T> >(d_s + N, d_si + N, d_ss + N, d_sw + N)),
transformRS<T>(),
thrust::make_tuple<T, unsigned int>(0.0, 0),
reduceRS<T>());
sa += get<0>(out);
sb += (T)get<1>(out);
}
// printf("sa = %f - sb = %f\n",sa,sb);
if (sb == 0)
{
printf("ERROR: sb = 0!! no sigma computed! exiting!\n");
exit(-1);
}
//initialize sigma for inlier voxel errors
m_sigma_gpu = sa / sb;
//initialize sigma for patch-wise robust statistics
m_sigma_s_gpu = 0.025f;
//initialize mixing proportion for inlier class in voxel-wise robust statistics (correctly matched voxels)
m_mix_gpu = 0.9f;
//initialize mixing proportion for outlier class in patch-wise robust statistics
m_mix_s_gpu = 0.9f;
//Initialize value for uniform distribution according to the range of intensities
m_m_gpu = (T)(1.0f / (2.1f * _max_intensity - 1.9f * _min_intensity));
if (m_debug)
{
std::cout << "Initializing robust statistics GPU: " << "sigma=" << sqrt(m_sigma_gpu) << " " << "m=" << m_m_gpu
<< " " << "mix=" << m_mix_gpu << " " << "mix_s=" << m_mix_s_gpu << std::endl;
}
}
template <typename T>
patchBasedRobustStatistics_gpu<T>::patchBasedRobustStatistics_gpu(std::vector<PatchBasedVolume<T> > & _inputStacks) :
m_inputStacks(_inputStacks), m_debug(false)
{
m_step = 0.0001;
strm_buffer = cout.rdbuf();
strm_buffer_e = cerr.rdbuf();
file.open("log-EM.txt");
file_e.open("log-EM-error.txt");
}
template <typename T>
void patchBasedRobustStatistics_gpu<T>::updateInputStacks(std::vector<PatchBasedVolume<T> > & _inputStacks)
{
m_inputStacks.clear();
m_inputStacks = _inputStacks;
}
template <typename T>
patchBasedRobustStatistics_gpu<T>::~patchBasedRobustStatistics_gpu()
{
}
template class patchBasedRobustStatistics_gpu < float > ;
template class patchBasedRobustStatistics_gpu < double > ; |
hybrid_with_omp.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cudpp.h>
#include<limits.h>
#include <sys/time.h>
#include <omp.h>
#define NO_OF_THREADS_PER_BLOCK 1024
#define OMP_NUM_THREADS 16
float f;
unsigned int noOfEdges;
unsigned int noOfVertices;
unsigned int *vertices;
unsigned int *edges;
unsigned int *weights;
unsigned int *d_size;
unsigned int *d_edgeListSize;
unsigned int *d_vertexListSize;
unsigned int *segmentedMinScanInput;
unsigned int *d_segmentedMinScanInput;
unsigned int *d_segmentedMinScanOutput;
unsigned int *d_previousIDs;
unsigned int *d_successorArray;
unsigned int *d_successorArrayTemp;
unsigned int *d_indices;
unsigned int *d_edgeMap;
unsigned int *d_edgeMapCopy;
unsigned int *d_edgesCopy;
unsigned int *d_edgeIndices;
unsigned int *d_superVertexID;
unsigned int *d_superEdgeId;
unsigned int *d_MSTOutput;
unsigned int *h_MSTOutput;
unsigned int *d_edges;
unsigned int *d_vertices;
unsigned int *d_weights;
unsigned int *d_edgeFlagArray;
unsigned int *d_vertexFlagArray;
unsigned int noOfEdgesOriginal;
unsigned int noOfVerticesOriginal;
int *d_pickArray;
CUDPPHandle theCudpp;
CUDPPHandle segmentedScanPlan_min;
CUDPPConfiguration segmented_min_scan_config;
CUDPPHandle scanPlan;
CUDPPConfiguration scan_config;
CUDPPHandle sortPlan;
CUDPPConfiguration config_sort;
/* Append vertexid and edge into a single integer of an array*/
__global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int temp = d_weight[index];
d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index];
}
}
/* initialise all entries of array pointed by d_array of given size to 0*/
__global__ void initArray(unsigned int *d_Array, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size) {
d_Array[index] = 0;
}
}
__global__ void initArray1(unsigned int *d_Array, unsigned int size, int t)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size && index >= t)
d_Array[index] = 0;
}
__global__ void printArr(unsigned int *d_arr, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size) {
printf("%d ", d_arr[index]);
}
printf("\n");
}
/* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/
__global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*prints new edge and vertex size*/
__global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize);
}
/*creates successor array*/
__global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
unsigned int minEdgeIndex;
if(index < noOfVertices) {
//index is same as vertex ID
if (index == noOfVertices-1)
minEdgeIndex = noOfEdges - 1;
else
minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour
unsigned int val = d_segmentedMinScanOutput[minEdgeIndex];
//unsigned int minWeight = val >> 22;
unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1);
d_successorArray[index] = minVertex;
}
}
/*removes cycles from successor array*/
__global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int succIndex = d_successor[d_successor[index]];
if(index == succIndex) {
if(index < d_successor[index]) {
d_successor[index] = index;
} else {
d_successor[d_successor[index]]= d_successor[index];
}
}
}
}
/* hybrid implementation of markSegment function */
__global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*This function is to determine which edges are actually needed*/
__global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
if(d_preIDs[index] != d_successor[d_preIDs[index]]) {
if(d_preIDs[index] < (noOfVertices - 1))
d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1;
else
d_pickArray[index] = noOfEdges - 1;
}
else
d_pickArray[index] = -1;
}
}
/*This function determines which edges will be part of output*/
__global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_pickArray[index] >= 0) {
unsigned int edgeid = d_edgeMap[index];
unsigned int prev = 0;
int temp = -1;
unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]];
unsigned int currIndex = d_segmentedMinScanOutput[index];
if(index > 0) {
temp = d_pickArray[index-1];
prev = d_segmentedMinScanOutput[index-1];
}
if(d_pickArray[index] != temp) {
if(currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
} else {
if(currIndex != prev && currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
}
}
}
/*This function sets each value of array equal to its index*/
__global__ void setIndices(unsigned int *d_arr,unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size)
d_arr[index] = index;
}
/* This function copies data from original successorArray so that it can be used for new computation*/
__global__ void setIndices1(unsigned int *d_arr,unsigned int size, int l)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size && index >= l)
d_arr[index] = index;
}
__global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArray[index];
d_successorArrayTemp[index] = t;
}
}
/* This function copies data from temporary successorArray so that it can be updated with correct value */
__global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArrayTemp[index];
d_successorArray[index] = t;
}
}
/* This function uses pointer doubling to assign representative id to each vertex*/
__global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int successor = d_successorArray[index];
if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2
*d_isSuccUpdated=true;
d_successorArrayTemp[index] = d_successorArray[successor];
}
}
}
/* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/
void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices)
{
bool succchange;
bool *d_isSuccUpdated;
hipMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices);
hipMalloc((void**)&d_isSuccUpdated, sizeof(bool));
do
{
succchange=false;
hipMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeTempSuccCopy), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
hipLaunchKernelGGL(( propagateRepVertexID), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices);
hipLaunchKernelGGL(( updateSuccArray), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
hipMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), hipMemcpyDeviceToHost);
}while(succchange);
hipFree(d_successorArrayTemp);
hipFree(d_isSuccUpdated);
}
/*This function creates scan flag*/
void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0)
{
unsigned int prev_val = d_successorArray[index-1];
unsigned int curr_val = d_successorArray[index];
if (prev_val != curr_val) {
d_vertexFlagArray[index] = 1;
}
}
}
/*This function assigns supervertex id to each vertex*/
__global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_vertexFlagArray[d_indices[index]] = d_superVertex[index];
}
}
/* This function updates supervertexid */
__global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int newId = d_vertexFlagArray[index];
d_superVertex[index] = newId;
}
}
/* This function removes self edges after successor array is created */
__global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5
unsigned int vid = d_superVertexID[d_edges[index]];
if(uid == vid) {
d_edges[index]=INT_MAX;
}
}
}
/* This function is to assign new super edge id*/
__global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges)
{
unsigned int x = d_previousIds[index];
unsigned int id = INT_MAX;
if (x != INT_MAX && d_edge[index] != INT_MAX) {
id = d_superVertexId[x];
}
d_superEdgeId[index] = id;
}
}
/* This function is to compress the edge list*/
__global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int id = d_edgeIndices[index];
if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) {
if(index == *d_size-1) {
*d_edgeListSize = index + 1;
*d_vertexListSize = d_superEdgeId[index] + 1;
}
d_segmentedMinScanOutput[index] = d_weights[id];
d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]];
d_pickArray[index] = d_superEdgeId[index];
d_edgeMapCopy[index] = d_edgeMap[id];
}
}
}
/*This function copies the temporary array to arrays which will be actually used*/
__global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int p = d_segmentedMinScanInput[index];
d_edges[index] = p;
unsigned int wt = d_segmentedMinScanOutput[index];
d_weights[index] = wt;
unsigned int mapVal = d_edgeMapCopy[index];
d_edgeMap[index] = mapVal;
}
}
/*This function determines the new edge list*/
__global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) {
*d_size = index;
}
if(d_superEdgeId[index] > d_superEdgeId[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps in creating new vertices list for next iteration*/
__global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_pickArray[index] > d_pickArray[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps to build new vertex list*/
__global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_edgeFlagArray[index] == 1) {
d_vertices[d_pickArray[index]] = index;
}
}
/* Parse the input file to setup our graph
* we set the relevant arrays here
*/
void parseInputFile(char *fileName)
{
unsigned int x,temp;
unsigned int edgeNo, weightOfEdge;
FILE *fp;
fp = fopen(fileName,"r");
printf("\n Parsing Input File: \n");
fscanf(fp,"%d",&noOfVertices);
vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices);
int i;
for (i=0; i<noOfVertices; i++) {
fscanf(fp,"%d %d",&x, &temp);
vertices[i] = x;
}
fscanf(fp,"%d",&temp);
fscanf(fp,"%d",&noOfEdges);
edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++) {
fscanf(fp,"%d %d",&edgeNo, &weightOfEdge);
edges[i] = edgeNo;
weights[i] = weightOfEdge;
}
printf("No. of Vertices in Input: %d\n",noOfVertices);
printf("No. of Edges in Input: %d\n", noOfEdges);
fclose(fp);
}
/* this is to setup configuration parameters for various primitives*/
void setupPlan()
{
cudppCreate(&theCudpp);
scan_config.algorithm = CUDPP_SCAN;
scan_config.op = CUDPP_ADD;
scan_config.datatype = CUDPP_UINT;
scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN;
segmented_min_scan_config.op = CUDPP_MIN;
segmented_min_scan_config.datatype = CUDPP_UINT;
segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
config_sort.algorithm = CUDPP_SORT_RADIX;
config_sort.datatype = CUDPP_UINT;
config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS;
f = 0.05;
}
/* Dynamically allocate necessary arrays*/
void mallocArr()
{
hipMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_weights, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edges, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_pickArray, sizeof(int )*noOfEdges);
hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_indices, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_size, sizeof(unsigned int ));
hipMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeListSize, sizeof(unsigned int ));
hipMalloc(&d_vertexListSize, sizeof(unsigned int ));
hipMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges);
h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges);
}
/*Free the dynamically allocated memory. Do other cleanup here*/
void cleanUp()
{
hipFree(d_edgeIndices);
hipFree(d_superEdgeId);
hipFree(d_edgeMap);
hipFree(d_edgeMapCopy);
hipFree(d_superVertexID);
hipFree(d_vertexFlagArray);
hipFree(d_indices);
hipFree(d_MSTOutput);
hipFree(d_previousIDs);
hipFree(d_pickArray);
hipFree(d_successorArray);
hipFree(d_segmentedMinScanOutput);
hipFree(d_edgeFlagArray);
hipFree(d_vertices);
hipFree(d_edges);
hipFree(d_weights);
hipFree(d_segmentedMinScanInput);
hipFree(d_size);
hipFree(d_edgeListSize);
hipFree(d_vertexListSize);
cudppDestroy(theCudpp);
free(h_MSTOutput);
free(edges);
free(vertices);
free(weights);
}
/* Do basic initialization*/
void initialize()
{
unsigned int i;
hipMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
hipMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++)
temp[i] = 0;
hipMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, hipMemcpyHostToDevice);
for(i=0; i<noOfEdges; i++)
temp[i]=i;
hipMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
free(temp);
}
/* Helper function to determine no of threads to be used */
unsigned int getNoOfThreads(unsigned int size) {
unsigned int threadsPerBlock;
if (size <= 1024)
threadsPerBlock = size;
else
threadsPerBlock = 1024;
return threadsPerBlock;
}
void boruvka()
{
int t;
unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges);
unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge;
unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices);
unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices;
hipError_t error;
hipLaunchKernelGGL(( mergeEdgeAndWeight), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("0.1 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges,t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_efa);
}
else
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 613 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipLaunchKernelGGL(( markSegment), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, d_edges, noOfVertices);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("3 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan
cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(segmentedScanPlan_min);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
hipLaunchKernelGGL(( createSuccArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges);
hipLaunchKernelGGL(( eliminateCycles), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, noOfVertices);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges,t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_efa);
}
else
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
hipDeviceSynchronize();
hipLaunchKernelGGL(( markSegment1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0);
cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(scanPlan);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 668 CUDA error: %s\n", hipGetErrorString(error));
}
t = noOfEdges * f;
if(noOfEdges >= 200)
{
unsigned int *temp_h_pa = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_pa[i] = 0;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_pickArray, temp_h_pa, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_pa);
}
else
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges);
hipDeviceSynchronize();
hipLaunchKernelGGL(( populatePArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges);
hipLaunchKernelGGL(( AppendOutputEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 698 CUDA error: %s\n", hipGetErrorString(error));
}
propagateID(noOfBlocks_vertices, noOfThreads_vertices);
t = noOfVertices*f;
if(noOfVertices >= 20)
{
unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_indices, noOfVertices, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_setIndices[i] = i;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_indices, temp_h_setIndices, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_setIndices);
}
else
hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_indices, noOfVertices, 0);
hipDeviceSynchronize();
cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0);
cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices);
cudppDestroyPlan(sortPlan);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray,noOfVertices);
hipLaunchKernelGGL(( createScanFlag), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray, d_successorArray,noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0);
cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices);
cudppDestroyPlan(scanPlan);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 736 CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( assignSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices);
hipLaunchKernelGGL(( updateSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices);
hipLaunchKernelGGL(( removeSelfEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edges,d_previousIDs,d_superVertexID,noOfEdges);
hipLaunchKernelGGL(( assignSuperEdgeId), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges);
t = noOfEdges*f;
//printf("noOfVertices = %d and point = %d\n",noOfVertices, t);
if (noOfEdges >= 200)
{
unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeIndices, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_setIndices[i] = i;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_edgeIndices, temp_h_setIndices, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_setIndices);
}
else
hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeIndices,noOfEdges,0);
hipDeviceSynchronize();
cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0);
cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges);
cudppDestroyPlan(sortPlan);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_edgeFlagArray, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_h_efa);
}
else
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray,noOfEdges);
hipDeviceSynchronize();
unsigned int h_size = noOfEdges + 1;
hipMemcpy(d_size,&h_size,sizeof(unsigned int ), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeEdgeList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 793 CUDA error: %s\n", hipGetErrorString(error));
}
unsigned int zero = 0;
hipMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice);
hipMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_arr = (unsigned int *)malloc(t*sizeof(unsigned int));
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, noOfEdges,t);
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanOutput, noOfEdges, t);
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges, t);
hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeMapCopy, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_arr[i] = 0;
#pragma omp barrier
hipDeviceSynchronize();
hipMemcpy(d_segmentedMinScanInput, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
hipMemcpy(d_segmentedMinScanOutput, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
hipMemcpy(d_pickArray, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
hipMemcpy(d_edgeMapCopy, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice);
free(temp_arr);
}
else
{
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanOutput, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeMapCopy, noOfEdges);
}
hipDeviceSynchronize();
hipMemcpy(&h_size,d_size,sizeof(unsigned int ), hipMemcpyDeviceToHost);
unsigned int noOfThreads_new = getNoOfThreads(h_size);
unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new;
hipLaunchKernelGGL(( edgeCompression), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 841 CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( copyArrays), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertices, noOfVertices);
hipLaunchKernelGGL(( CreateVertexListFlag), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges);
hipLaunchKernelGGL(( BuildVertexList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("after build vertex listlast CUDA error: %s\n", hipGetErrorString(error));
}
hipMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost);
hipMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost);
printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("last CUDA error: %s\n", hipGetErrorString(error));
}
}
int main (int argc, char** argv)
{
unsigned int noOfMSTEdges = 0;
unsigned long long int finalMSTWeight = 0;
unsigned int i;
parseInputFile(argv[1]);
noOfVerticesOriginal = noOfVertices;
noOfEdgesOriginal = noOfEdges;
omp_set_dynamic(0);
omp_set_num_threads(omp_get_num_procs());
mallocArr();
initialize();
setupPlan();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
do {
boruvka();
}while(noOfVertices > 1);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
hipMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, hipMemcpyDeviceToHost);
for(i=0; i<noOfEdgesOriginal; i++) {
if(h_MSTOutput[i] == 1) {
//printf("%d %d\n", edges[i], weights[i]);
finalMSTWeight += weights[i];
noOfMSTEdges++;
}
}
printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges);
printf("Final Weight of resultant MST: %llu\n", finalMSTWeight);
cleanUp();
return 0;
}
| hybrid_with_omp.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cudpp.h>
#include<limits.h>
#include <sys/time.h>
#include <omp.h>
#define NO_OF_THREADS_PER_BLOCK 1024
#define OMP_NUM_THREADS 16
float f;
unsigned int noOfEdges;
unsigned int noOfVertices;
unsigned int *vertices;
unsigned int *edges;
unsigned int *weights;
unsigned int *d_size;
unsigned int *d_edgeListSize;
unsigned int *d_vertexListSize;
unsigned int *segmentedMinScanInput;
unsigned int *d_segmentedMinScanInput;
unsigned int *d_segmentedMinScanOutput;
unsigned int *d_previousIDs;
unsigned int *d_successorArray;
unsigned int *d_successorArrayTemp;
unsigned int *d_indices;
unsigned int *d_edgeMap;
unsigned int *d_edgeMapCopy;
unsigned int *d_edgesCopy;
unsigned int *d_edgeIndices;
unsigned int *d_superVertexID;
unsigned int *d_superEdgeId;
unsigned int *d_MSTOutput;
unsigned int *h_MSTOutput;
unsigned int *d_edges;
unsigned int *d_vertices;
unsigned int *d_weights;
unsigned int *d_edgeFlagArray;
unsigned int *d_vertexFlagArray;
unsigned int noOfEdgesOriginal;
unsigned int noOfVerticesOriginal;
int *d_pickArray;
CUDPPHandle theCudpp;
CUDPPHandle segmentedScanPlan_min;
CUDPPConfiguration segmented_min_scan_config;
CUDPPHandle scanPlan;
CUDPPConfiguration scan_config;
CUDPPHandle sortPlan;
CUDPPConfiguration config_sort;
/* Append vertexid and edge into a single integer of an array*/
__global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int temp = d_weight[index];
d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index];
}
}
/* initialise all entries of array pointed by d_array of given size to 0*/
__global__ void initArray(unsigned int *d_Array, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size) {
d_Array[index] = 0;
}
}
__global__ void initArray1(unsigned int *d_Array, unsigned int size, int t)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size && index >= t)
d_Array[index] = 0;
}
__global__ void printArr(unsigned int *d_arr, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size) {
printf("%d ", d_arr[index]);
}
printf("\n");
}
/* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/
__global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*prints new edge and vertex size*/
__global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize);
}
/*creates successor array*/
__global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
unsigned int minEdgeIndex;
if(index < noOfVertices) {
//index is same as vertex ID
if (index == noOfVertices-1)
minEdgeIndex = noOfEdges - 1;
else
minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour
unsigned int val = d_segmentedMinScanOutput[minEdgeIndex];
//unsigned int minWeight = val >> 22;
unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1);
d_successorArray[index] = minVertex;
}
}
/*removes cycles from successor array*/
__global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int succIndex = d_successor[d_successor[index]];
if(index == succIndex) {
if(index < d_successor[index]) {
d_successor[index] = index;
} else {
d_successor[d_successor[index]]= d_successor[index];
}
}
}
}
/* hybrid implementation of markSegment function */
__global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*This function is to determine which edges are actually needed*/
__global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
if(d_preIDs[index] != d_successor[d_preIDs[index]]) {
if(d_preIDs[index] < (noOfVertices - 1))
d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1;
else
d_pickArray[index] = noOfEdges - 1;
}
else
d_pickArray[index] = -1;
}
}
/*This function determines which edges will be part of output*/
__global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_pickArray[index] >= 0) {
unsigned int edgeid = d_edgeMap[index];
unsigned int prev = 0;
int temp = -1;
unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]];
unsigned int currIndex = d_segmentedMinScanOutput[index];
if(index > 0) {
temp = d_pickArray[index-1];
prev = d_segmentedMinScanOutput[index-1];
}
if(d_pickArray[index] != temp) {
if(currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
} else {
if(currIndex != prev && currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
}
}
}
/*This function sets each value of array equal to its index*/
__global__ void setIndices(unsigned int *d_arr,unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size)
d_arr[index] = index;
}
/* This function copies data from original successorArray so that it can be used for new computation*/
__global__ void setIndices1(unsigned int *d_arr,unsigned int size, int l)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size && index >= l)
d_arr[index] = index;
}
__global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArray[index];
d_successorArrayTemp[index] = t;
}
}
/* This function copies data from temporary successorArray so that it can be updated with correct value */
__global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArrayTemp[index];
d_successorArray[index] = t;
}
}
/* This function uses pointer doubling to assign representative id to each vertex*/
__global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int successor = d_successorArray[index];
if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2
*d_isSuccUpdated=true;
d_successorArrayTemp[index] = d_successorArray[successor];
}
}
}
/* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/
void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices)
{
bool succchange;
bool *d_isSuccUpdated;
cudaMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices);
cudaMalloc((void**)&d_isSuccUpdated, sizeof(bool));
do
{
succchange=false;
cudaMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), cudaMemcpyHostToDevice);
makeTempSuccCopy<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
propagateRepVertexID<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices);
updateSuccArray<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
cudaMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), cudaMemcpyDeviceToHost);
}while(succchange);
cudaFree(d_successorArrayTemp);
cudaFree(d_isSuccUpdated);
}
/*This function creates scan flag*/
void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0)
{
unsigned int prev_val = d_successorArray[index-1];
unsigned int curr_val = d_successorArray[index];
if (prev_val != curr_val) {
d_vertexFlagArray[index] = 1;
}
}
}
/*This function assigns supervertex id to each vertex*/
__global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_vertexFlagArray[d_indices[index]] = d_superVertex[index];
}
}
/* This function updates supervertexid */
__global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int newId = d_vertexFlagArray[index];
d_superVertex[index] = newId;
}
}
/* This function removes self edges after successor array is created */
__global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5
unsigned int vid = d_superVertexID[d_edges[index]];
if(uid == vid) {
d_edges[index]=INT_MAX;
}
}
}
/* This function is to assign new super edge id*/
__global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges)
{
unsigned int x = d_previousIds[index];
unsigned int id = INT_MAX;
if (x != INT_MAX && d_edge[index] != INT_MAX) {
id = d_superVertexId[x];
}
d_superEdgeId[index] = id;
}
}
/* This function is to compress the edge list*/
__global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int id = d_edgeIndices[index];
if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) {
if(index == *d_size-1) {
*d_edgeListSize = index + 1;
*d_vertexListSize = d_superEdgeId[index] + 1;
}
d_segmentedMinScanOutput[index] = d_weights[id];
d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]];
d_pickArray[index] = d_superEdgeId[index];
d_edgeMapCopy[index] = d_edgeMap[id];
}
}
}
/*This function copies the temporary array to arrays which will be actually used*/
__global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int p = d_segmentedMinScanInput[index];
d_edges[index] = p;
unsigned int wt = d_segmentedMinScanOutput[index];
d_weights[index] = wt;
unsigned int mapVal = d_edgeMapCopy[index];
d_edgeMap[index] = mapVal;
}
}
/*This function determines the new edge list*/
__global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) {
*d_size = index;
}
if(d_superEdgeId[index] > d_superEdgeId[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps in creating new vertices list for next iteration*/
__global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_pickArray[index] > d_pickArray[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps to build new vertex list*/
__global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_edgeFlagArray[index] == 1) {
d_vertices[d_pickArray[index]] = index;
}
}
/* Parse the input file to setup our graph
* we set the relevant arrays here
*/
void parseInputFile(char *fileName)
{
unsigned int x,temp;
unsigned int edgeNo, weightOfEdge;
FILE *fp;
fp = fopen(fileName,"r");
printf("\n Parsing Input File: \n");
fscanf(fp,"%d",&noOfVertices);
vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices);
int i;
for (i=0; i<noOfVertices; i++) {
fscanf(fp,"%d %d",&x, &temp);
vertices[i] = x;
}
fscanf(fp,"%d",&temp);
fscanf(fp,"%d",&noOfEdges);
edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++) {
fscanf(fp,"%d %d",&edgeNo, &weightOfEdge);
edges[i] = edgeNo;
weights[i] = weightOfEdge;
}
printf("No. of Vertices in Input: %d\n",noOfVertices);
printf("No. of Edges in Input: %d\n", noOfEdges);
fclose(fp);
}
/* this is to setup configuration parameters for various primitives*/
void setupPlan()
{
cudppCreate(&theCudpp);
scan_config.algorithm = CUDPP_SCAN;
scan_config.op = CUDPP_ADD;
scan_config.datatype = CUDPP_UINT;
scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN;
segmented_min_scan_config.op = CUDPP_MIN;
segmented_min_scan_config.datatype = CUDPP_UINT;
segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
config_sort.algorithm = CUDPP_SORT_RADIX;
config_sort.datatype = CUDPP_UINT;
config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS;
f = 0.05;
}
/* Dynamically allocate necessary arrays*/
void mallocArr()
{
cudaMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_weights, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edges, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_pickArray, sizeof(int )*noOfEdges);
cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_indices, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_size, sizeof(unsigned int ));
cudaMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeListSize, sizeof(unsigned int ));
cudaMalloc(&d_vertexListSize, sizeof(unsigned int ));
cudaMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges);
h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges);
}
/*Free the dynamically allocated memory. Do other cleanup here*/
void cleanUp()
{
cudaFree(d_edgeIndices);
cudaFree(d_superEdgeId);
cudaFree(d_edgeMap);
cudaFree(d_edgeMapCopy);
cudaFree(d_superVertexID);
cudaFree(d_vertexFlagArray);
cudaFree(d_indices);
cudaFree(d_MSTOutput);
cudaFree(d_previousIDs);
cudaFree(d_pickArray);
cudaFree(d_successorArray);
cudaFree(d_segmentedMinScanOutput);
cudaFree(d_edgeFlagArray);
cudaFree(d_vertices);
cudaFree(d_edges);
cudaFree(d_weights);
cudaFree(d_segmentedMinScanInput);
cudaFree(d_size);
cudaFree(d_edgeListSize);
cudaFree(d_vertexListSize);
cudppDestroy(theCudpp);
free(h_MSTOutput);
free(edges);
free(vertices);
free(weights);
}
/* Do basic initialization*/
void initialize()
{
unsigned int i;
cudaMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
cudaMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++)
temp[i] = 0;
cudaMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, cudaMemcpyHostToDevice);
for(i=0; i<noOfEdges; i++)
temp[i]=i;
cudaMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
free(temp);
}
/* Helper function to determine no of threads to be used */
unsigned int getNoOfThreads(unsigned int size) {
unsigned int threadsPerBlock;
if (size <= 1024)
threadsPerBlock = size;
else
threadsPerBlock = 1024;
return threadsPerBlock;
}
void boruvka()
{
int t;
unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges);
unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge;
unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices);
unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices;
cudaError_t error;
mergeEdgeAndWeight<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("0.1 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges,t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_efa);
}
else
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
cudaThreadSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 613 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
markSegment<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, d_edges, noOfVertices);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("3 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan
cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(segmentedScanPlan_min);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
createSuccArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges);
eliminateCycles<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, noOfVertices);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges,t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_efa);
}
else
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
cudaThreadSynchronize();
markSegment1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0);
cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(scanPlan);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 668 CUDA error: %s\n", cudaGetErrorString(error));
}
t = noOfEdges * f;
if(noOfEdges >= 200)
{
unsigned int *temp_h_pa = (unsigned int *)malloc(t*sizeof(unsigned int));
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_pa[i] = 0;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_pickArray, temp_h_pa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_pa);
}
else
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges);
cudaThreadSynchronize();
populatePArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges);
AppendOutputEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 698 CUDA error: %s\n", cudaGetErrorString(error));
}
propagateID(noOfBlocks_vertices, noOfThreads_vertices);
t = noOfVertices*f;
if(noOfVertices >= 20)
{
unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int));
setIndices1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_indices, noOfVertices, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_setIndices[i] = i;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_indices, temp_h_setIndices, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_setIndices);
}
else
setIndices1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_indices, noOfVertices, 0);
cudaThreadSynchronize();
cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0);
cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices);
cudppDestroyPlan(sortPlan);
initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray,noOfVertices);
createScanFlag<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray, d_successorArray,noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0);
cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices);
cudppDestroyPlan(scanPlan);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 736 CUDA error: %s\n", cudaGetErrorString(error));
}
assignSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices);
updateSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices);
removeSelfEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edges,d_previousIDs,d_superVertexID,noOfEdges);
assignSuperEdgeId<<<noOfBlocks_edge, noOfThreads_edge>>>(d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges);
t = noOfEdges*f;
//printf("noOfVertices = %d and point = %d\n",noOfVertices, t);
if (noOfEdges >= 200)
{
unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int));
setIndices1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeIndices, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_setIndices[i] = i;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_edgeIndices, temp_h_setIndices, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_setIndices);
}
else
setIndices1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeIndices,noOfEdges,0);
cudaThreadSynchronize();
cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0);
cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges);
cudppDestroyPlan(sortPlan);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int));
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_edgeFlagArray, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_h_efa[i] = 0;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_h_efa);
}
else
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray,noOfEdges);
cudaThreadSynchronize();
unsigned int h_size = noOfEdges + 1;
cudaMemcpy(d_size,&h_size,sizeof(unsigned int ), cudaMemcpyHostToDevice);
makeEdgeList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 793 CUDA error: %s\n", cudaGetErrorString(error));
}
unsigned int zero = 0;
cudaMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice);
cudaMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice);
t = noOfEdges * f;
if (noOfEdges >= 200)
{
unsigned int *temp_arr = (unsigned int *)malloc(t*sizeof(unsigned int));
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, noOfEdges,t);
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanOutput, noOfEdges, t);
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges, t);
initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeMapCopy, noOfEdges, t);
int i;
#pragma omp parallel for
for(i = 0; i<t;i++)
temp_arr[i] = 0;
#pragma omp barrier
cudaThreadSynchronize();
cudaMemcpy(d_segmentedMinScanInput, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
cudaMemcpy(d_segmentedMinScanOutput, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
cudaMemcpy(d_pickArray, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeMapCopy, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice);
free(temp_arr);
}
else
{
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanOutput, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeMapCopy, noOfEdges);
}
cudaThreadSynchronize();
cudaMemcpy(&h_size,d_size,sizeof(unsigned int ), cudaMemcpyDeviceToHost);
unsigned int noOfThreads_new = getNoOfThreads(h_size);
unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new;
edgeCompression<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 841 CUDA error: %s\n", cudaGetErrorString(error));
}
copyArrays<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertices, noOfVertices);
CreateVertexListFlag<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges);
BuildVertexList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("after build vertex listlast CUDA error: %s\n", cudaGetErrorString(error));
}
cudaMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost);
cudaMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost);
printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("last CUDA error: %s\n", cudaGetErrorString(error));
}
}
int main (int argc, char** argv)
{
unsigned int noOfMSTEdges = 0;
unsigned long long int finalMSTWeight = 0;
unsigned int i;
parseInputFile(argv[1]);
noOfVerticesOriginal = noOfVertices;
noOfEdgesOriginal = noOfEdges;
omp_set_dynamic(0);
omp_set_num_threads(omp_get_num_procs());
mallocArr();
initialize();
setupPlan();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
do {
boruvka();
}while(noOfVertices > 1);
cudaThreadSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
cudaMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, cudaMemcpyDeviceToHost);
for(i=0; i<noOfEdgesOriginal; i++) {
if(h_MSTOutput[i] == 1) {
//printf("%d %d\n", edges[i], weights[i]);
finalMSTWeight += weights[i];
noOfMSTEdges++;
}
}
printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges);
printf("Final Weight of resultant MST: %llu\n", finalMSTWeight);
cleanUp();
return 0;
}
|
c8fed9f0fe2681108bfddfb44c0b847fda2b4184.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ){
const int dir[16][2] = {{-2, -2}, {0, -2}, {2, -2},
{-1, -1}, {0, -1}, {1, -1},
{-2, 0}, {-1, 0}, {1, 0}, {2, 0},
{-1, 1}, {0, 1}, {1, 1},
{-2, 2}, {0, 2}, {2, 2}};
const int coef[16] = { 1, 1, 1,
2, 2, 2,
1, 2, 2, 1,
2, 2, 2,
1, 1, 1};
const int num = 24;
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt * yt + xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f){
float sum[3] = {0}, bsum[3] = {0};
for (int i=0; i<16; i++){
int dxt = xt + dir[i][0];
int dyt = yt + dir[i][1];
int dcurt = wt * dyt + dxt;
int dxb = ox + dxt;
int dyb = oy + dyt;
if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht){
sum[0] += target[dcurt*3 + 0] * coef[i];
sum[1] += target[dcurt*3 + 1] * coef[i];
sum[2] += target[dcurt*3 + 2] * coef[i];
}
else {
sum[0] += target[curt*3 + 0] * coef[i];
sum[1] += target[curt*3 + 1] * coef[i];
sum[2] += target[curt*3 + 2] * coef[i];
}
if (dxt < 0 || dxt >= wt || dyt < 0 || dyt >= ht || mask[dcurt] < 127.0f){
dxb = dxb < 0 ? 0:
dxb >= wb? wb-1: dxb;
dyb = dyb < 0 ? 0:
dyb >= hb? hb-1: dyb;
int dcurb = wb * dyb + dxb;
bsum[0] += background[dcurb*3 + 0] * coef[i];
bsum[1] += background[dcurb*3 + 1] * coef[i];
bsum[2] += background[dcurb*3 + 2] * coef[i];
}
}
fixed[curt*3+0] = target[curt*3+0] - sum[0] / num + bsum[0] / num;
fixed[curt*3+1] = target[curt*3+1] - sum[1] / num + bsum[1] / num;
fixed[curt*3+2] = target[curt*3+2] - sum[2] / num + bsum[2] / num;
}
} | c8fed9f0fe2681108bfddfb44c0b847fda2b4184.cu | #include "includes.h"
__global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ){
const int dir[16][2] = {{-2, -2}, {0, -2}, {2, -2},
{-1, -1}, {0, -1}, {1, -1},
{-2, 0}, {-1, 0}, {1, 0}, {2, 0},
{-1, 1}, {0, 1}, {1, 1},
{-2, 2}, {0, 2}, {2, 2}};
const int coef[16] = { 1, 1, 1,
2, 2, 2,
1, 2, 2, 1,
2, 2, 2,
1, 1, 1};
const int num = 24;
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt * yt + xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f){
float sum[3] = {0}, bsum[3] = {0};
for (int i=0; i<16; i++){
int dxt = xt + dir[i][0];
int dyt = yt + dir[i][1];
int dcurt = wt * dyt + dxt;
int dxb = ox + dxt;
int dyb = oy + dyt;
if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht){
sum[0] += target[dcurt*3 + 0] * coef[i];
sum[1] += target[dcurt*3 + 1] * coef[i];
sum[2] += target[dcurt*3 + 2] * coef[i];
}
else {
sum[0] += target[curt*3 + 0] * coef[i];
sum[1] += target[curt*3 + 1] * coef[i];
sum[2] += target[curt*3 + 2] * coef[i];
}
if (dxt < 0 || dxt >= wt || dyt < 0 || dyt >= ht || mask[dcurt] < 127.0f){
dxb = dxb < 0 ? 0:
dxb >= wb? wb-1: dxb;
dyb = dyb < 0 ? 0:
dyb >= hb? hb-1: dyb;
int dcurb = wb * dyb + dxb;
bsum[0] += background[dcurb*3 + 0] * coef[i];
bsum[1] += background[dcurb*3 + 1] * coef[i];
bsum[2] += background[dcurb*3 + 2] * coef[i];
}
}
fixed[curt*3+0] = target[curt*3+0] - sum[0] / num + bsum[0] / num;
fixed[curt*3+1] = target[curt*3+1] - sum[1] / num + bsum[1] / num;
fixed[curt*3+2] = target[curt*3+2] - sum[2] / num + bsum[2] / num;
}
} |
affd5aba3c825a43e9b6547be6c7e76ae091f1b9.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
#include "complex_arith.cu"
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
__constant__ Info_Stat info_stat;
__constant__ SHORT nL_dev;
__constant__ complex_double diag_terms_dev[MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL];
/* Just think of the vox indices as (blk_dep*BLK_SIZE + dep + info_stat.bounZ, blk_row*BLK_SIZE + row + info_stat.bounY, blk_col * BLK_SIZE + col + info_stat.nX) */
__device__ int get_voxind_spind(int blk_dep, int blk_row,int blk_col, int dep, int row, int col, int cnt){
return VOX_TO_SPIND( ((blk_dep + dep + info_stat.bounZ)*(2*info_stat.bounX + info_stat.nX) * (2*info_stat.bounY + info_stat.nY) + (blk_row*BLK_SIZE + row + info_stat.bounY)* (2*info_stat.bounX + info_stat.nX) + (blk_col * BLK_SIZE + col + info_stat.bounX)), cnt, (nL_dev+1)*(nL_dev+1));
}
__device__ int get_vind_phanind(int dep, int row, int col){
return ((info_stat.bounZ + dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + ( info_stat.bounY + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + col));
}
__device__ int get_voxind_phanind(int blk_dep, int blk_row,int blk_col,int blk_offset_i, int blk_offset_j, int blk_offset_k, int dep, int row, int col){
return (info_stat.bounZ + blk_dep + blk_offset_i * BLK_SIZE+ dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + (blk_row * BLK_SIZE + info_stat.bounY + blk_offset_j * BLK_SIZE + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + blk_col * BLK_SIZE + blk_offset_k * BLK_SIZE + col);
}
__device__ int get_ind_phanind(int dep, int row, int col){
return dep* (2*info_stat.boun_blk_sizeX + BLK_SIZE)* (BLK_SIZE + 2*info_stat.boun_blk_sizeY) + row* (BLK_SIZE + 2*info_stat.boun_blk_sizeX) + col;
}
__device__ complex_double
mu_int (COR *cor_src, COR *cor_dest, float r, Info_Dyn info_dyn)
{
flt_doub mu_tot_path;
mu_tot_path = 0.0 + 0.0*I;
float alpha_x_curr, alpha_y_curr, alpha_z_curr;
float alpha_xinc, alpha_yinc, alpha_zinc;
SHORT i_curr, j_curr, k_curr;
float alpha_prev;
byte flag = 1;
alpha_x_curr = 0;
alpha_y_curr = 0;
alpha_z_curr = 0;
alpha_prev = 0;
SHORT x_inc_end,y_inc_end,z_inc_end;
i_curr = cor_src->i;
j_curr = cor_src->j;
k_curr = cor_src->k;
if (cor_dest->i != cor_src->i)
alpha_zinc = fabs (0.5 / (cor_dest->i - cor_src->i)); // The initial increment along the z axis.
else
alpha_zinc = INF;
if (cor_dest->j != cor_src->j)
alpha_yinc = fabs (0.5 / (cor_dest->j - cor_src->j));
else
alpha_yinc = INF;
if (cor_dest->k != cor_src->k)
alpha_xinc = fabs (0.5 / (cor_dest->k - cor_src->k));
else
alpha_xinc = INF;
#if 1
while (flag == 1 && alpha_prev < 1) // Hack for now to avoid infinite loops
{
if (alpha_z_curr + alpha_zinc <= alpha_x_curr + alpha_xinc && alpha_z_curr + alpha_zinc <= alpha_y_curr + alpha_yinc)
{
alpha_z_curr += alpha_zinc;
if ( i_curr == cor_src->i)
alpha_zinc *=2; // We have taken the first step along the z axis, which was half a voxel. Now every step will be one voxel.
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_z_curr - alpha_prev);
i_curr = (cor_src->i < cor_dest->i) ? i_curr + 1 : i_curr - 1;
alpha_prev = alpha_z_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_zinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_y_curr + alpha_yinc < alpha_z_curr + alpha_zinc && alpha_y_curr + alpha_yinc <= alpha_x_curr + alpha_xinc )
{
alpha_y_curr += alpha_yinc;
if ( j_curr == cor_src->j)
alpha_yinc *=2;
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_y_curr - alpha_prev);
j_curr = (cor_src->j < cor_dest->j) ? j_curr + 1 : j_curr - 1;
alpha_prev = alpha_y_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_yinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_x_curr + alpha_xinc < alpha_y_curr + alpha_yinc && alpha_x_curr + alpha_xinc < alpha_z_curr + alpha_zinc )
{
alpha_x_curr += alpha_xinc;
if ( k_curr == cor_src->k)
alpha_xinc *=2;
mu_tot_path = mu_tot_path + (info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]]) * r * (alpha_x_curr - alpha_prev);
k_curr = (cor_src->k < cor_dest->k) ? k_curr + 1 : k_curr - 1;
alpha_prev = alpha_x_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_xinc/2.0;
flag = 0;
return mu_tot_path;
}
}
}
#endif
return mu_tot_path;
}
__global__ void compute_diagonal_abs (doublecomplex *src_dist,doublecomplex *out_dist,Info_Dyn info_dyn, int cnt,SHORT block_dep, SHORT layer_start, SHORT flag)
{
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
int sp_ind, sp_ind_src, r_ind;
SHORT cntp;
SHORT nL_tmp;
flt_doub cm_tmp = 1.0;
nL_tmp = (flag == 0) ? 0 : nL_dev;
// cm_tmp = (flag == 0) ? 1 : info_stat.cm
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for(cnt=0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col ,i,j,k,cnt);
out_dist[sp_ind] = 0 + 0*I;
for(cntp=0; cntp<(nL_tmp+1)*(nL_tmp+1) ; cntp++){
sp_ind_src = get_voxind_spind(block_dep + layer_start, block_row, block_col,i,j,k,cntp);
out_dist[sp_ind] = out_dist[sp_ind] + (1.0/cm_tmp)*diag_terms_dev[cnt * (MAX_NL) * MAX_NL * MAX_TISS_NUM + cntp * MAX_TISS_NUM + info_dyn.tiss_type[r_ind]] * src_dist[sp_ind_src];
}
}
}
#if 0
//extern __shared__ char array_tisstype[];
__global__ void compute_subvox_2(complex_double* src_dist_dev,complex_double* out_dist_dev, Info_Dyn info_dyn, complex_double* sph_harm_dev, SHORT cnt, COR subvox_src,COR subvox_dest, flt_doub dz_sub, flt_doub dy_sub, flt_doub dx_sub, SHORT blk_dep, SHORT start_layer){
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
SHORT cntp;
complex_double out_tmp, tmp;
COR cor_src, cor_dest;
SHORT ip, jp, kp;
int sp_ind;
flt_doub theta,phi,dist,dx,dy,dz;
int sp_ind_src;
#if 0
cor_src.i = i + info_stat.boun_blk_sizeZ;
cor_src.j = j + info_stat.boun_blk_sizeY;
cor_src.k = k + info_stat.boun_blk_sizeX;
#else
cor_src.i = i + blk_dep + start_layer;
cor_src.j = j + block_row*BLK_SIZE;
cor_src.k = k + block_col*BLK_SIZE;
#endif
// __shared__ complex_double src_tmp[BLK_SRC_SIZE];
#if 0
int blk_offset_i, blk_offset_j,blk_offset_k;
byte *tisstype_tmp = (byte *) array_tisstype;
for(blk_offset_i=0; blk_offset_i< 1 + 2*info_stat.boun_blkZ; blk_offset_i++){
for(blk_offset_j=0; blk_offset_j< 1 + 2*info_stat.boun_blkY; blk_offset_j++){
for(blk_offset_k=0; blk_offset_k< 1 + 2*info_stat.boun_blkX; blk_offset_k++){
tisstype_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = info_dyn.tiss_type[get_voxind_phanind(blk_dep, block_row, block_col,blk_offset_i - info_stat.boun_blkZ,blk_offset_j - info_stat.boun_blkY,blk_offset_k - info_stat.boun_blkX,i,j,k)];
// src_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = src_dist_dev[VOX_TO_SPIND(get_voxind_phanind(blk_dep, block_row, block_col,(blk_offset_i - info_stat.bounZ/BLK_SIZE),(blk_offset_j - info_stat.bounY/BLK_SIZE),(blk_offset_k - info_stat.bounX/BLK_SIZE),i,j,k),cntp,info_stat.no_vox)];
}
}
}
__syncthreads();
#endif
out_tmp = 0 + 0*I;
flt_doub sub_dist;
for(ip= i - info_stat.subbounZ; ip <= i + info_stat.subbounZ; ip++){
dz = -(ip-i)*info_stat.delZ + dz_sub;
for(jp= j - info_stat.subbounY; jp <= j + info_stat.subbounY; jp++){
dy = -(jp-j)*info_stat.delY + dy_sub;
for(kp= k - info_stat.subbounX; kp <= k + info_stat.subbounX; kp++){
dx = -(kp-k)*info_stat.delX + dx_sub;
dist = sqrt((i-ip)*(i-ip)*info_stat.delZ*info_stat.delZ + (j-jp)*(j-jp)*info_stat.delY*info_stat.delY + (k-kp)*(k-kp)*info_stat.delX*info_stat.delX);
if( dist <= info_stat.sub_thresh && ( i != ip || j != jp || k != kp)){
sub_dist = sqrt(dx*dx + dy*dy + dz*dz);
#if 0
cor_dest.i = ip + info_stat.boun_blk_sizeZ;
cor_dest.j = jp + info_stat.boun_blk_sizeY;
cor_dest.k = kp + info_stat.boun_blk_sizeX;
#else
cor_dest.i = ip +blk_dep + start_layer;
cor_dest.j = jp + block_row*BLK_SIZE;
cor_dest.k = kp + block_col*BLK_SIZE;
#endif
theta = atan(sqrt(dx*dx + dy*dy)/dz );
phi = atan2(dy,dx);
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_dev+1)*(nL_dev+1); cntp++){
sp_ind_src = get_voxind_spind(blk_dep + start_layer, block_row, block_col,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src] * sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp* cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, info_dyn )); ;//cexp_dev(-(1.01+0.0*I)*sub_dist);// cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, tisstype_tmp,info_dyn )); //cexp_dev(-(1.01+0.0*I)*dist)
tmp = tmp * (1.0/( info_stat.cm * sub_dist*sub_dist * __powf(__int2float_ru(info_stat.sub_vox),6.0)));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
//out_tmp = out_tmp + tmp;
}
}
}
}
}
__syncthreads();
}
#endif
__global__ void compute_propabs(complex_double *src_dist_dev, complex_double *out_dist_dev, Info_Dyn info_dyn,complex_double *sph_harm_dev,SHORT cnt, SHORT blk_dep, SHORT start_layer, SHORT flag){ // If flag =1, only then evaluate all the other spherical harmonics, else nl = 0.
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
int sp_ind;
COR *cor_src, *cor_dest;
SHORT cntp;
flt_doub theta,phi,dist,dx,dy,dz;
SHORT ip,jp,kp;
int sp_ind_src;
int nL_tmp;
flt_doub cm_tmp = 1.0;
cor_src = (COR*)malloc(sizeof(COR));
cor_dest = (COR*)malloc(sizeof(COR));
complex_double tmp;
nL_tmp = (flag == 0) ? 0 : nL_dev;
cor_src->i = i + blk_dep + start_layer;
cor_src->j = j + block_row*BLK_SIZE;
cor_src->k = k + block_col*BLK_SIZE;
//int sp_ind2;
for (ip = 0; ip < info_stat.nZ ; ip++){
dz = -(ip-cor_src->i)*info_stat.delZ;
for (jp = 0; jp < info_stat.nY ; jp++){
dy = -(jp-cor_src->j)*info_stat.delY;
for (kp = 0; kp < info_stat.nX; kp++){
dx = -(kp-cor_src->k)*info_stat.delX;
dist = sqrt(dx*dx + dy*dy + dz*dz);
if((ip != cor_src->i || jp != cor_src->j || kp != cor_src->k) && dist < info_stat.prop_thresh){
theta = acos(dz/dist );
if(theta < 0)
theta = theta + M_PI;
phi = atan2(dy,dx);
// if(phi < 0)
// phi = phi + 2*M_PI;
#if 1
cor_dest->i = ip;
cor_dest->j = jp;
cor_dest->k = kp;
#endif
for(cnt = 0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_tmp+1)*(nL_tmp+1); cntp++){
sp_ind_src = get_voxind_spind(0, 0, 0,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src]*sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp * cexp_dev(-mu_int(cor_src, cor_dest, dist, info_dyn )); //cexp_dev(-1*(1.01 + 0*I)*dist); // have commented this line
// tmp = tmp * cexp_dev(-1*(1.01 + 0*I)*dist);
tmp = tmp * (1.0/(cm_tmp*dist*dist));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
}
}
}
}
}
free(cor_src);
free(cor_dest);
__syncthreads();
}
__global__ void scale_dist_dev (doublecomplex *W,double scale_fac,SHORT cnt,SHORT block_dep )
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep , block_row , block_col ,i,j,k,cnt);
W[sp_ind] = W[sp_ind]*scale_fac;
}
}
__global__ void prop_scat_dev (doublecomplex *out_dist, Info_Dyn info_dyn, short layer_start, SHORT block_dep)
{
int sp_ind,cnt,l,r_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep, block_row, block_col,i,j,k,cnt);
l = (int) sqrtf(cnt);
out_dist[sp_ind] = pow (__int2double_rn(info_stat.g),__int2double_rn(l)) * out_dist[sp_ind] * info_stat.mu_sc[info_dyn.tiss_type[r_ind]] ;
}
__syncthreads();
}
__global__ void write_dist_dev (doublecomplex *W,doublecomplex val,SHORT cnt,SHORT block_dep, SHORT layer_start)
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ + layer_start, block_row, block_col,i,j,k,cnt);
W[sp_ind] = val;
}
}
__global__ void copy_dist_dev (doublecomplex *W1,doublecomplex *W2)
{
int sp_ind;
int cnt=0;
int block_dep=0;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
W2[sp_ind] = W1[sp_ind];
__syncthreads();
}
}
}
__global__ void add_dist_dev (doublecomplex *W1,doublecomplex *W2, doublecomplex *out )
{
int sp_ind,cnt,block_dep;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep -info_stat.bounZ, block_row, block_col,i,j,k,cnt);
out[sp_ind] = W1[sp_ind] + W2[sp_ind];
}
}
}
__global__ void compute_sph_coord(flt_doub* theta_self, flt_doub* phi_self, flt_doub* sph_x, flt_doub* sph_y, flt_doub* sph_z, int theta_blk, int phi_blk)
{
int theta_count = threadIdx.x + theta_blk;
int phi_count = threadIdx.y + phi_blk;
int omega_count;
omega_count = theta_count*ANG_RES + phi_count;
theta_self[omega_count] = theta_count * M_PI / ANG_RES ;
phi_self[omega_count] = phi_count * 2.0*M_PI / ANG_RES ;
sph_x[omega_count] = cos(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES);
sph_y[omega_count] = sin(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES) ;
sph_z[omega_count] = cos(theta_count * M_PI / ANG_RES) ;
}
__global__ void compute_diag_selfsub(complex_double *fact_self_vox, flt_doub *sph_x, flt_doub *sph_y, flt_doub *sph_z, flt_doub *theta_self, int omega_count, SHORT tiss_num, int blk_dep)
{
int blk_row = blockIdx.y;
int blk_col = blockIdx.x;
int z_ind = threadIdx.z + blk_dep;
int y_ind = threadIdx.y +blk_row* BLK_SELF_SUB_VOX;
int x_ind = threadIdx.x + blk_col* BLK_SELF_SUB_VOX;
int face_calc;
int face = 1;
flt_doub face_x, face_y, face_z, cube_x, cube_y, cube_z, dist_self ;
//int r_ind_self = (threadIdx.z + blk_dep) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (threadIdx.y +blk_row* BLK_SELF_SUB_VOX) * info_stat.self_sub_vox + (threadIdx.x + blk_col* BLK_SELF_SUB_VOX);
int r_ind_self = (z_ind) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (y_ind) * info_stat.self_sub_vox + (x_ind);
flt_doub ii_self = -info_stat.self_sub_vox/2.0 +0.5 + z_ind;
flt_doub jj_self = -info_stat.self_sub_vox/2.0 +0.5 + y_ind;
flt_doub kk_self = -info_stat.self_sub_vox/2.0 +0.5 + x_ind;
face_x = 0;
face_y = 0;
face_z = 0;
cube_x = 0;
cube_y = 0;
cube_z = 0;
if (sph_x[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_x = face_calc ==0 ? face:-face;
face_y = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_x[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
face_z = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_x[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <=face && face_y >= -face && face_z <= face && face_z >= -face && sph_x[omega_count] * face_x >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#if 1
if(sph_y[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_y = face_calc ==0 ? face:-face;
face_z = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_y[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
face_x = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_y[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_y[omega_count] * face_y >= 0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
if(sph_z[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_z = face_calc ==0 ? face:-face;
face_x = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_z[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
face_y = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_z[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_z[omega_count] * face_z >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#endif
dist_self = sqrt( (ii_self*2.0/info_stat.self_sub_vox - cube_x)*(ii_self*2.0/info_stat.self_sub_vox - cube_x) + (jj_self*2.0/info_stat.self_sub_vox- cube_y)*(jj_self*2.0/info_stat.self_sub_vox- cube_y) + (kk_self*2.0/info_stat.self_sub_vox - cube_z)*(kk_self*2.0/info_stat.self_sub_vox - cube_z)) * info_stat.delX/2.0; //square voxel approx.
fact_self_vox[omega_count * info_stat.self_sub_vox * info_stat.self_sub_vox * info_stat.self_sub_vox + r_ind_self ] = ( 1 - cexp( -(info_stat.mu_tot[tiss_num]) * dist_self)) * sin(theta_self[omega_count]);
}
#if 1
void generate_diag_terms_dev() {
int ang_res = ANG_RES;
int omega_count,ang_ind;
int r_ind_self;
complex_double *rt_self, *rtp_self;
int l,m,lp,mp,cnt,cntp;
flt_doub *theta_self, *phi_self, *sph_x_self, *sph_y_self, *sph_z_self;
flt_doub cm, del_theta, del_phi;
complex_double sub_v_sum_self;
int i;
cm = C / phan->n;
diag_terms_host = (complex_double *)malloc(sizeof(complex_double )* MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL);
theta_self = (flt_doub *)malloc(sizeof(flt_doub) * pow( ang_res,2));
phi_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_x_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_y_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_z_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
rt_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
rtp_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
flt_doub *theta_self_dev, *phi_self_dev,*sph_x_dev,*sph_y_dev,*sph_z_dev;
complex_double *fact_self_vox_dev, *fact_self_vox_host;
hipMalloc(&theta_self_dev, sizeof(flt_doub)*pow( ang_res,2));
MY_SAFE_CALL(hipMalloc(&phi_self_dev, sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_x_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_y_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_z_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&fact_self_vox_dev, sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2)));
fact_self_vox_host = (complex_double *) malloc (sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2));
if(fact_self_vox_host == NULL){
printf("error in memory allocation \n");
exit(0);
}
dim3 dim_block_1(BLK_ANG_SIZE,BLK_ANG_SIZE,1);
dim3 dim_grid_1(1,1);
dim3 dim_block_2(BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX);
dim3 dim_grid_2(geom->self_sub_vox/BLK_SELF_SUB_VOX,geom->self_sub_vox/BLK_SELF_SUB_VOX);
int theta_count, phi_count;
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
hipLaunchKernelGGL(( compute_sph_coord), dim3(dim_grid_1), dim3(dim_block_1), 0, 0, theta_self_dev, phi_self_dev, sph_x_dev, sph_y_dev, sph_z_dev, theta_count, phi_count);
checkCUDAError("Kernel invocation in compute_sph_coord\n");
}
}
hipMemcpy(theta_self, theta_self_dev, sizeof(flt_doub)*pow( ang_res,2), hipMemcpyDeviceToHost);
hipMemcpy(phi_self, phi_self_dev, sizeof(flt_doub)*pow( ang_res,2), hipMemcpyDeviceToHost);
omega_count = 0;
/*
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
omega_count = theta_count * ANG_RES + phi_count;
// printf("%f %f %f \n", sph_x_self[omega_count], sph_y_self[omega_count],sph_z_self[omega_count], omega_count);
}
}
*/
del_theta = M_PI / ANG_RES;
del_phi = 2*M_PI / ANG_RES;
int tiss_num;
int blk_dep;
omega_count = 0;
for (tiss_num = 1; tiss_num < phan->no_tiss; tiss_num++){
for ( omega_count = 0; omega_count < pow(ang_res,2); omega_count++){
for(blk_dep=0; blk_dep < geom->self_sub_vox; blk_dep = blk_dep + BLK_SELF_SUB_VOX){
hipLaunchKernelGGL(( compute_diag_selfsub), dim3(dim_grid_2), dim3(dim_block_2), 0, 0, fact_self_vox_dev, sph_x_dev, sph_y_dev,sph_z_dev, theta_self_dev, omega_count, tiss_num,blk_dep);
checkCUDAError("Kernel invocation in compute_diag_selfsub\n");
}
}
hipMemcpy(fact_self_vox_host, fact_self_vox_dev, sizeof(complex_double) * pow(geom->self_sub_vox,3) * pow( ang_res,2), hipMemcpyDeviceToHost);
cnt = 0;
for (l = 0; l <= nL; l++) {
for (m = -l; m <= l; m++) {
cntp = 0;
SpherHarmonicArray(l, m, powf(ang_res,2), theta_self, phi_self, rt_self);
for (lp = 0; lp <= nL; lp++) {
for (mp = -lp; mp <= lp; mp++) {
sub_v_sum_self = 0.0 + 0.0*I;
SpherHarmonicArray(lp, mp, pow(ang_res,2), theta_self, phi_self, rtp_self);
for ( omega_count = 0; omega_count < ang_res * ang_res; omega_count++){
for ( r_ind_self = 0; r_ind_self < pow(geom->self_sub_vox,3); r_ind_self++){
sub_v_sum_self = sub_v_sum_self + ~(rt_self[omega_count]) * rtp_self[omega_count] * fact_self_vox_host[omega_count * (int)pow(geom->self_sub_vox,3) + r_ind_self];
}
}
diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num] = sub_v_sum_self * del_theta * del_phi / (cm * pow((double)geom->self_sub_vox,3) * (phan->mu_abs[tiss_num] + phan->mu_sc[tiss_num]) * geom->delX * geom->delY * geom->delZ) ;
if(cnt == cntp){
printf("The diagonal term is %e +%e i for tiss = %d, cnt = %d and cntp = %d \n", diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].real(), diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].imag(), tiss_num, cnt, cntp);
}
cntp++;
}
}
cnt++;
}
}
}
hipFree(sph_x_dev);
hipFree(sph_y_dev);
hipFree(sph_z_dev);
hipFree(theta_self_dev);
hipFree(phi_self_dev);
exit(0);
}
#endif
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void * prop_abs(void *arg){
#if 1
SHORT cnt;
SHORT block_dep;
//COR subvox_src, subvox_dest;
//SIGNED_SHORT i,j,k,ip,jp,kp;
//flt_doub dx_sub, dy_sub, dz_sub;
dim3 dim_block(BLK_SIZE, BLK_SIZE, BLK_SIZE_Z);
dim3 dim_grid(geom->nX/dim_block.x,geom->nY/dim_block.y);
// printf("% d and %d are no of blocks per grid \n", dim_grid.y, dim_grid.x);
// printf("% d %d and %d are no of threads per block \n", dim_block.x, dim_block.y, dim_block.z);
const THREAD_PARAMETERS parameters = *((THREAD_PARAMETERS *) arg);
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1) ;
int size = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * (geom->nZ + 2*geom->bounZ) * ( nL+1) * (nL+1);
complex_double *src_dev, *out_dev;
if(hipSetDevice(parameters.device_index) != hipSuccess){
printf("Error in setting up device %d \n", parameters.device_index);
exit(0);
}
MY_SAFE_CALL(hipMalloc(&src_dev, sizeof(complex_double)*size));
MY_SAFE_CALL(hipMalloc(&out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemcpy(src_dev, parameters.src_host,sizeof(complex_double)*size, hipMemcpyHostToDevice));
Info_Dyn info_dyn_dev;
MY_SAFE_CALL(hipMalloc(&(info_dyn_dev.tiss_type), sizeof(byte)*geom->no_vox));
MY_SAFE_CALL(hipMemcpy(info_dyn_dev.tiss_type, phan->tiss_type,sizeof(byte)*geom->no_vox, hipMemcpyHostToDevice));
MY_SAFE_CALL(hipMemcpyToSymbol(info_stat,info_stat_host,sizeof(Info_Stat) ));
MY_SAFE_CALL(hipMemcpyToSymbol(nL_dev,&nL,sizeof(SHORT) ));
MY_SAFE_CALL(hipMemcpyToSymbol(diag_terms_dev,diag_terms_host, sizeof(complex_double)*MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL));
complex_double *sph_harm_dev;
MY_SAFE_CALL(hipMalloc(&sph_harm_dev, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
MY_SAFE_CALL(hipMemcpy(sph_harm_dev,sph_harm, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES,hipMemcpyHostToDevice));
#if 0
for(cnt=0; cnt < (nL+1)*(nL+1); cnt++){
printf("Invoking compute_diagonal_abs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( write_dist_dev), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev,1+0.0*I,cnt,block_dep, parameters.layer_start);
}
}
#endif
#if 1
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( compute_diagonal_abs), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev,out_dev,info_dyn_dev,cnt,block_dep, parameters.layer_start, parameters.flag);
checkCUDAError("Kernel invocation in compute_diagonal_abs\n");
}
#endif
/* The prop_thresh condition. Again run thread for all the voxels */
#if 1
// printf("Invoking compute_propabs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( compute_propabs), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev, out_dev, info_dyn_dev, sph_harm_dev, cnt, block_dep,parameters.layer_start,parameters.flag);
//printf("%d operation complete \n", block_dep/parameters.num_layers);
checkCUDAError("Kernel invocation in compute_propabs\n");
}
#endif
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( scale_dist_dev), dim3(dim_grid), dim3(dim_block), 0, 0, out_dev, geom->delX * geom->delY * geom->delZ,cnt,block_dep);
checkCUDAError("Kernel invocation in scale_dist_dev\n");
}
#if 1
if(parameters.flag_scat){
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( prop_scat_dev), dim3(dim_grid), dim3(dim_block), 0, 0, out_dev, info_dyn_dev,parameters.layer_start,block_dep);
checkCUDAError("Kernel invocation in prop_dscat_dev\n");
}
}
#endif
hipDeviceSynchronize();
MY_SAFE_CALL(hipMemcpy(parameters.out_host, out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers), hipMemcpyDeviceToHost));
MY_SAFE_CALL(hipMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemset(src_dev, 0, sizeof(complex_double)*size));
MY_SAFE_CALL(hipMemset(sph_harm_dev, 0, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
hipFree(src_dev);
hipFree(out_dev);
hipFree(sph_harm_dev);
hipDeviceReset();
pthread_exit(NULL);
#endif
}
| affd5aba3c825a43e9b6547be6c7e76ae091f1b9.cu | //#include <cuda.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
#include "complex_arith.cu"
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
__constant__ Info_Stat info_stat;
__constant__ SHORT nL_dev;
__constant__ complex_double diag_terms_dev[MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL];
/* Just think of the vox indices as (blk_dep*BLK_SIZE + dep + info_stat.bounZ, blk_row*BLK_SIZE + row + info_stat.bounY, blk_col * BLK_SIZE + col + info_stat.nX) */
__device__ int get_voxind_spind(int blk_dep, int blk_row,int blk_col, int dep, int row, int col, int cnt){
return VOX_TO_SPIND( ((blk_dep + dep + info_stat.bounZ)*(2*info_stat.bounX + info_stat.nX) * (2*info_stat.bounY + info_stat.nY) + (blk_row*BLK_SIZE + row + info_stat.bounY)* (2*info_stat.bounX + info_stat.nX) + (blk_col * BLK_SIZE + col + info_stat.bounX)), cnt, (nL_dev+1)*(nL_dev+1));
}
__device__ int get_vind_phanind(int dep, int row, int col){
return ((info_stat.bounZ + dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + ( info_stat.bounY + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + col));
}
__device__ int get_voxind_phanind(int blk_dep, int blk_row,int blk_col,int blk_offset_i, int blk_offset_j, int blk_offset_k, int dep, int row, int col){
return (info_stat.bounZ + blk_dep + blk_offset_i * BLK_SIZE+ dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + (blk_row * BLK_SIZE + info_stat.bounY + blk_offset_j * BLK_SIZE + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + blk_col * BLK_SIZE + blk_offset_k * BLK_SIZE + col);
}
__device__ int get_ind_phanind(int dep, int row, int col){
return dep* (2*info_stat.boun_blk_sizeX + BLK_SIZE)* (BLK_SIZE + 2*info_stat.boun_blk_sizeY) + row* (BLK_SIZE + 2*info_stat.boun_blk_sizeX) + col;
}
__device__ complex_double
mu_int (COR *cor_src, COR *cor_dest, float r, Info_Dyn info_dyn)
{
flt_doub mu_tot_path;
mu_tot_path = 0.0 + 0.0*I;
float alpha_x_curr, alpha_y_curr, alpha_z_curr;
float alpha_xinc, alpha_yinc, alpha_zinc;
SHORT i_curr, j_curr, k_curr;
float alpha_prev;
byte flag = 1;
alpha_x_curr = 0;
alpha_y_curr = 0;
alpha_z_curr = 0;
alpha_prev = 0;
SHORT x_inc_end,y_inc_end,z_inc_end;
i_curr = cor_src->i;
j_curr = cor_src->j;
k_curr = cor_src->k;
if (cor_dest->i != cor_src->i)
alpha_zinc = fabs (0.5 / (cor_dest->i - cor_src->i)); // The initial increment along the z axis.
else
alpha_zinc = INF;
if (cor_dest->j != cor_src->j)
alpha_yinc = fabs (0.5 / (cor_dest->j - cor_src->j));
else
alpha_yinc = INF;
if (cor_dest->k != cor_src->k)
alpha_xinc = fabs (0.5 / (cor_dest->k - cor_src->k));
else
alpha_xinc = INF;
#if 1
while (flag == 1 && alpha_prev < 1) // Hack for now to avoid infinite loops
{
if (alpha_z_curr + alpha_zinc <= alpha_x_curr + alpha_xinc && alpha_z_curr + alpha_zinc <= alpha_y_curr + alpha_yinc)
{
alpha_z_curr += alpha_zinc;
if ( i_curr == cor_src->i)
alpha_zinc *=2; // We have taken the first step along the z axis, which was half a voxel. Now every step will be one voxel.
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_z_curr - alpha_prev);
i_curr = (cor_src->i < cor_dest->i) ? i_curr + 1 : i_curr - 1;
alpha_prev = alpha_z_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_zinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_y_curr + alpha_yinc < alpha_z_curr + alpha_zinc && alpha_y_curr + alpha_yinc <= alpha_x_curr + alpha_xinc )
{
alpha_y_curr += alpha_yinc;
if ( j_curr == cor_src->j)
alpha_yinc *=2;
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_y_curr - alpha_prev);
j_curr = (cor_src->j < cor_dest->j) ? j_curr + 1 : j_curr - 1;
alpha_prev = alpha_y_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_yinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_x_curr + alpha_xinc < alpha_y_curr + alpha_yinc && alpha_x_curr + alpha_xinc < alpha_z_curr + alpha_zinc )
{
alpha_x_curr += alpha_xinc;
if ( k_curr == cor_src->k)
alpha_xinc *=2;
mu_tot_path = mu_tot_path + (info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]]) * r * (alpha_x_curr - alpha_prev);
k_curr = (cor_src->k < cor_dest->k) ? k_curr + 1 : k_curr - 1;
alpha_prev = alpha_x_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_xinc/2.0;
flag = 0;
return mu_tot_path;
}
}
}
#endif
return mu_tot_path;
}
__global__ void compute_diagonal_abs (doublecomplex *src_dist,doublecomplex *out_dist,Info_Dyn info_dyn, int cnt,SHORT block_dep, SHORT layer_start, SHORT flag)
{
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
int sp_ind, sp_ind_src, r_ind;
SHORT cntp;
SHORT nL_tmp;
flt_doub cm_tmp = 1.0;
nL_tmp = (flag == 0) ? 0 : nL_dev;
// cm_tmp = (flag == 0) ? 1 : info_stat.cm
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for(cnt=0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col ,i,j,k,cnt);
out_dist[sp_ind] = 0 + 0*I;
for(cntp=0; cntp<(nL_tmp+1)*(nL_tmp+1) ; cntp++){
sp_ind_src = get_voxind_spind(block_dep + layer_start, block_row, block_col,i,j,k,cntp);
out_dist[sp_ind] = out_dist[sp_ind] + (1.0/cm_tmp)*diag_terms_dev[cnt * (MAX_NL) * MAX_NL * MAX_TISS_NUM + cntp * MAX_TISS_NUM + info_dyn.tiss_type[r_ind]] * src_dist[sp_ind_src];
}
}
}
#if 0
//extern __shared__ char array_tisstype[];
__global__ void compute_subvox_2(complex_double* src_dist_dev,complex_double* out_dist_dev, Info_Dyn info_dyn, complex_double* sph_harm_dev, SHORT cnt, COR subvox_src,COR subvox_dest, flt_doub dz_sub, flt_doub dy_sub, flt_doub dx_sub, SHORT blk_dep, SHORT start_layer){
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
SHORT cntp;
complex_double out_tmp, tmp;
COR cor_src, cor_dest;
SHORT ip, jp, kp;
int sp_ind;
flt_doub theta,phi,dist,dx,dy,dz;
int sp_ind_src;
#if 0
cor_src.i = i + info_stat.boun_blk_sizeZ;
cor_src.j = j + info_stat.boun_blk_sizeY;
cor_src.k = k + info_stat.boun_blk_sizeX;
#else
cor_src.i = i + blk_dep + start_layer;
cor_src.j = j + block_row*BLK_SIZE;
cor_src.k = k + block_col*BLK_SIZE;
#endif
// __shared__ complex_double src_tmp[BLK_SRC_SIZE];
#if 0
int blk_offset_i, blk_offset_j,blk_offset_k;
byte *tisstype_tmp = (byte *) array_tisstype;
for(blk_offset_i=0; blk_offset_i< 1 + 2*info_stat.boun_blkZ; blk_offset_i++){
for(blk_offset_j=0; blk_offset_j< 1 + 2*info_stat.boun_blkY; blk_offset_j++){
for(blk_offset_k=0; blk_offset_k< 1 + 2*info_stat.boun_blkX; blk_offset_k++){
tisstype_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = info_dyn.tiss_type[get_voxind_phanind(blk_dep, block_row, block_col,blk_offset_i - info_stat.boun_blkZ,blk_offset_j - info_stat.boun_blkY,blk_offset_k - info_stat.boun_blkX,i,j,k)];
// src_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = src_dist_dev[VOX_TO_SPIND(get_voxind_phanind(blk_dep, block_row, block_col,(blk_offset_i - info_stat.bounZ/BLK_SIZE),(blk_offset_j - info_stat.bounY/BLK_SIZE),(blk_offset_k - info_stat.bounX/BLK_SIZE),i,j,k),cntp,info_stat.no_vox)];
}
}
}
__syncthreads();
#endif
out_tmp = 0 + 0*I;
flt_doub sub_dist;
for(ip= i - info_stat.subbounZ; ip <= i + info_stat.subbounZ; ip++){
dz = -(ip-i)*info_stat.delZ + dz_sub;
for(jp= j - info_stat.subbounY; jp <= j + info_stat.subbounY; jp++){
dy = -(jp-j)*info_stat.delY + dy_sub;
for(kp= k - info_stat.subbounX; kp <= k + info_stat.subbounX; kp++){
dx = -(kp-k)*info_stat.delX + dx_sub;
dist = sqrt((i-ip)*(i-ip)*info_stat.delZ*info_stat.delZ + (j-jp)*(j-jp)*info_stat.delY*info_stat.delY + (k-kp)*(k-kp)*info_stat.delX*info_stat.delX);
if( dist <= info_stat.sub_thresh && ( i != ip || j != jp || k != kp)){
sub_dist = sqrt(dx*dx + dy*dy + dz*dz);
#if 0
cor_dest.i = ip + info_stat.boun_blk_sizeZ;
cor_dest.j = jp + info_stat.boun_blk_sizeY;
cor_dest.k = kp + info_stat.boun_blk_sizeX;
#else
cor_dest.i = ip +blk_dep + start_layer;
cor_dest.j = jp + block_row*BLK_SIZE;
cor_dest.k = kp + block_col*BLK_SIZE;
#endif
theta = atan(sqrt(dx*dx + dy*dy)/dz );
phi = atan2(dy,dx);
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_dev+1)*(nL_dev+1); cntp++){
sp_ind_src = get_voxind_spind(blk_dep + start_layer, block_row, block_col,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src] * sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp* cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, info_dyn )); ;//cexp_dev(-(1.01+0.0*I)*sub_dist);// cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, tisstype_tmp,info_dyn )); //cexp_dev(-(1.01+0.0*I)*dist)
tmp = tmp * (1.0/( info_stat.cm * sub_dist*sub_dist * __powf(__int2float_ru(info_stat.sub_vox),6.0)));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
//out_tmp = out_tmp + tmp;
}
}
}
}
}
__syncthreads();
}
#endif
__global__ void compute_propabs(complex_double *src_dist_dev, complex_double *out_dist_dev, Info_Dyn info_dyn,complex_double *sph_harm_dev,SHORT cnt, SHORT blk_dep, SHORT start_layer, SHORT flag){ // If flag =1, only then evaluate all the other spherical harmonics, else nl = 0.
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
int sp_ind;
COR *cor_src, *cor_dest;
SHORT cntp;
flt_doub theta,phi,dist,dx,dy,dz;
SHORT ip,jp,kp;
int sp_ind_src;
int nL_tmp;
flt_doub cm_tmp = 1.0;
cor_src = (COR*)malloc(sizeof(COR));
cor_dest = (COR*)malloc(sizeof(COR));
complex_double tmp;
nL_tmp = (flag == 0) ? 0 : nL_dev;
cor_src->i = i + blk_dep + start_layer;
cor_src->j = j + block_row*BLK_SIZE;
cor_src->k = k + block_col*BLK_SIZE;
//int sp_ind2;
for (ip = 0; ip < info_stat.nZ ; ip++){
dz = -(ip-cor_src->i)*info_stat.delZ;
for (jp = 0; jp < info_stat.nY ; jp++){
dy = -(jp-cor_src->j)*info_stat.delY;
for (kp = 0; kp < info_stat.nX; kp++){
dx = -(kp-cor_src->k)*info_stat.delX;
dist = sqrt(dx*dx + dy*dy + dz*dz);
if((ip != cor_src->i || jp != cor_src->j || kp != cor_src->k) && dist < info_stat.prop_thresh){
theta = acos(dz/dist );
if(theta < 0)
theta = theta + M_PI;
phi = atan2(dy,dx);
// if(phi < 0)
// phi = phi + 2*M_PI;
#if 1
cor_dest->i = ip;
cor_dest->j = jp;
cor_dest->k = kp;
#endif
for(cnt = 0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_tmp+1)*(nL_tmp+1); cntp++){
sp_ind_src = get_voxind_spind(0, 0, 0,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src]*sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp * cexp_dev(-mu_int(cor_src, cor_dest, dist, info_dyn )); //cexp_dev(-1*(1.01 + 0*I)*dist); // have commented this line
// tmp = tmp * cexp_dev(-1*(1.01 + 0*I)*dist);
tmp = tmp * (1.0/(cm_tmp*dist*dist));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
}
}
}
}
}
free(cor_src);
free(cor_dest);
__syncthreads();
}
__global__ void scale_dist_dev (doublecomplex *W,double scale_fac,SHORT cnt,SHORT block_dep )
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep , block_row , block_col ,i,j,k,cnt);
W[sp_ind] = W[sp_ind]*scale_fac;
}
}
__global__ void prop_scat_dev (doublecomplex *out_dist, Info_Dyn info_dyn, short layer_start, SHORT block_dep)
{
int sp_ind,cnt,l,r_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep, block_row, block_col,i,j,k,cnt);
l = (int) sqrtf(cnt);
out_dist[sp_ind] = pow (__int2double_rn(info_stat.g),__int2double_rn(l)) * out_dist[sp_ind] * info_stat.mu_sc[info_dyn.tiss_type[r_ind]] ;
}
__syncthreads();
}
__global__ void write_dist_dev (doublecomplex *W,doublecomplex val,SHORT cnt,SHORT block_dep, SHORT layer_start)
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ + layer_start, block_row, block_col,i,j,k,cnt);
W[sp_ind] = val;
}
}
__global__ void copy_dist_dev (doublecomplex *W1,doublecomplex *W2)
{
int sp_ind;
int cnt=0;
int block_dep=0;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
W2[sp_ind] = W1[sp_ind];
__syncthreads();
}
}
}
__global__ void add_dist_dev (doublecomplex *W1,doublecomplex *W2, doublecomplex *out )
{
int sp_ind,cnt,block_dep;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep -info_stat.bounZ, block_row, block_col,i,j,k,cnt);
out[sp_ind] = W1[sp_ind] + W2[sp_ind];
}
}
}
__global__ void compute_sph_coord(flt_doub* theta_self, flt_doub* phi_self, flt_doub* sph_x, flt_doub* sph_y, flt_doub* sph_z, int theta_blk, int phi_blk)
{
int theta_count = threadIdx.x + theta_blk;
int phi_count = threadIdx.y + phi_blk;
int omega_count;
omega_count = theta_count*ANG_RES + phi_count;
theta_self[omega_count] = theta_count * M_PI / ANG_RES ;
phi_self[omega_count] = phi_count * 2.0*M_PI / ANG_RES ;
sph_x[omega_count] = cos(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES);
sph_y[omega_count] = sin(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES) ;
sph_z[omega_count] = cos(theta_count * M_PI / ANG_RES) ;
}
__global__ void compute_diag_selfsub(complex_double *fact_self_vox, flt_doub *sph_x, flt_doub *sph_y, flt_doub *sph_z, flt_doub *theta_self, int omega_count, SHORT tiss_num, int blk_dep)
{
int blk_row = blockIdx.y;
int blk_col = blockIdx.x;
int z_ind = threadIdx.z + blk_dep;
int y_ind = threadIdx.y +blk_row* BLK_SELF_SUB_VOX;
int x_ind = threadIdx.x + blk_col* BLK_SELF_SUB_VOX;
int face_calc;
int face = 1;
flt_doub face_x, face_y, face_z, cube_x, cube_y, cube_z, dist_self ;
//int r_ind_self = (threadIdx.z + blk_dep) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (threadIdx.y +blk_row* BLK_SELF_SUB_VOX) * info_stat.self_sub_vox + (threadIdx.x + blk_col* BLK_SELF_SUB_VOX);
int r_ind_self = (z_ind) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (y_ind) * info_stat.self_sub_vox + (x_ind);
flt_doub ii_self = -info_stat.self_sub_vox/2.0 +0.5 + z_ind;
flt_doub jj_self = -info_stat.self_sub_vox/2.0 +0.5 + y_ind;
flt_doub kk_self = -info_stat.self_sub_vox/2.0 +0.5 + x_ind;
face_x = 0;
face_y = 0;
face_z = 0;
cube_x = 0;
cube_y = 0;
cube_z = 0;
if (sph_x[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_x = face_calc ==0 ? face:-face;
face_y = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_x[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
face_z = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_x[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <=face && face_y >= -face && face_z <= face && face_z >= -face && sph_x[omega_count] * face_x >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#if 1
if(sph_y[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_y = face_calc ==0 ? face:-face;
face_z = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_y[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
face_x = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_y[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_y[omega_count] * face_y >= 0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
if(sph_z[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_z = face_calc ==0 ? face:-face;
face_x = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_z[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
face_y = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_z[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_z[omega_count] * face_z >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#endif
dist_self = sqrt( (ii_self*2.0/info_stat.self_sub_vox - cube_x)*(ii_self*2.0/info_stat.self_sub_vox - cube_x) + (jj_self*2.0/info_stat.self_sub_vox- cube_y)*(jj_self*2.0/info_stat.self_sub_vox- cube_y) + (kk_self*2.0/info_stat.self_sub_vox - cube_z)*(kk_self*2.0/info_stat.self_sub_vox - cube_z)) * info_stat.delX/2.0; //square voxel approx.
fact_self_vox[omega_count * info_stat.self_sub_vox * info_stat.self_sub_vox * info_stat.self_sub_vox + r_ind_self ] = ( 1 - cexp( -(info_stat.mu_tot[tiss_num]) * dist_self)) * sin(theta_self[omega_count]);
}
#if 1
void generate_diag_terms_dev() {
int ang_res = ANG_RES;
int omega_count,ang_ind;
int r_ind_self;
complex_double *rt_self, *rtp_self;
int l,m,lp,mp,cnt,cntp;
flt_doub *theta_self, *phi_self, *sph_x_self, *sph_y_self, *sph_z_self;
flt_doub cm, del_theta, del_phi;
complex_double sub_v_sum_self;
int i;
cm = C / phan->n;
diag_terms_host = (complex_double *)malloc(sizeof(complex_double )* MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL);
theta_self = (flt_doub *)malloc(sizeof(flt_doub) * pow( ang_res,2));
phi_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_x_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_y_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_z_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
rt_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
rtp_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
flt_doub *theta_self_dev, *phi_self_dev,*sph_x_dev,*sph_y_dev,*sph_z_dev;
complex_double *fact_self_vox_dev, *fact_self_vox_host;
cudaMalloc(&theta_self_dev, sizeof(flt_doub)*pow( ang_res,2));
MY_SAFE_CALL(cudaMalloc(&phi_self_dev, sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_x_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_y_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_z_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&fact_self_vox_dev, sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2)));
fact_self_vox_host = (complex_double *) malloc (sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2));
if(fact_self_vox_host == NULL){
printf("error in memory allocation \n");
exit(0);
}
dim3 dim_block_1(BLK_ANG_SIZE,BLK_ANG_SIZE,1);
dim3 dim_grid_1(1,1);
dim3 dim_block_2(BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX);
dim3 dim_grid_2(geom->self_sub_vox/BLK_SELF_SUB_VOX,geom->self_sub_vox/BLK_SELF_SUB_VOX);
int theta_count, phi_count;
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
compute_sph_coord<<<dim_grid_1, dim_block_1>>>(theta_self_dev, phi_self_dev, sph_x_dev, sph_y_dev, sph_z_dev, theta_count, phi_count);
checkCUDAError("Kernel invocation in compute_sph_coord\n");
}
}
cudaMemcpy(theta_self, theta_self_dev, sizeof(flt_doub)*pow( ang_res,2), cudaMemcpyDeviceToHost);
cudaMemcpy(phi_self, phi_self_dev, sizeof(flt_doub)*pow( ang_res,2), cudaMemcpyDeviceToHost);
omega_count = 0;
/*
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
omega_count = theta_count * ANG_RES + phi_count;
// printf("%f %f %f \n", sph_x_self[omega_count], sph_y_self[omega_count],sph_z_self[omega_count], omega_count);
}
}
*/
del_theta = M_PI / ANG_RES;
del_phi = 2*M_PI / ANG_RES;
int tiss_num;
int blk_dep;
omega_count = 0;
for (tiss_num = 1; tiss_num < phan->no_tiss; tiss_num++){
for ( omega_count = 0; omega_count < pow(ang_res,2); omega_count++){
for(blk_dep=0; blk_dep < geom->self_sub_vox; blk_dep = blk_dep + BLK_SELF_SUB_VOX){
compute_diag_selfsub<<<dim_grid_2, dim_block_2>>>(fact_self_vox_dev, sph_x_dev, sph_y_dev,sph_z_dev, theta_self_dev, omega_count, tiss_num,blk_dep);
checkCUDAError("Kernel invocation in compute_diag_selfsub\n");
}
}
cudaMemcpy(fact_self_vox_host, fact_self_vox_dev, sizeof(complex_double) * pow(geom->self_sub_vox,3) * pow( ang_res,2), cudaMemcpyDeviceToHost);
cnt = 0;
for (l = 0; l <= nL; l++) {
for (m = -l; m <= l; m++) {
cntp = 0;
SpherHarmonicArray(l, m, powf(ang_res,2), theta_self, phi_self, rt_self);
for (lp = 0; lp <= nL; lp++) {
for (mp = -lp; mp <= lp; mp++) {
sub_v_sum_self = 0.0 + 0.0*I;
SpherHarmonicArray(lp, mp, pow(ang_res,2), theta_self, phi_self, rtp_self);
for ( omega_count = 0; omega_count < ang_res * ang_res; omega_count++){
for ( r_ind_self = 0; r_ind_self < pow(geom->self_sub_vox,3); r_ind_self++){
sub_v_sum_self = sub_v_sum_self + ~(rt_self[omega_count]) * rtp_self[omega_count] * fact_self_vox_host[omega_count * (int)pow(geom->self_sub_vox,3) + r_ind_self];
}
}
diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num] = sub_v_sum_self * del_theta * del_phi / (cm * pow((double)geom->self_sub_vox,3) * (phan->mu_abs[tiss_num] + phan->mu_sc[tiss_num]) * geom->delX * geom->delY * geom->delZ) ;
if(cnt == cntp){
printf("The diagonal term is %e +%e i for tiss = %d, cnt = %d and cntp = %d \n", diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].real(), diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].imag(), tiss_num, cnt, cntp);
}
cntp++;
}
}
cnt++;
}
}
}
cudaFree(sph_x_dev);
cudaFree(sph_y_dev);
cudaFree(sph_z_dev);
cudaFree(theta_self_dev);
cudaFree(phi_self_dev);
exit(0);
}
#endif
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void * prop_abs(void *arg){
#if 1
SHORT cnt;
SHORT block_dep;
//COR subvox_src, subvox_dest;
//SIGNED_SHORT i,j,k,ip,jp,kp;
//flt_doub dx_sub, dy_sub, dz_sub;
dim3 dim_block(BLK_SIZE, BLK_SIZE, BLK_SIZE_Z);
dim3 dim_grid(geom->nX/dim_block.x,geom->nY/dim_block.y);
// printf("% d and %d are no of blocks per grid \n", dim_grid.y, dim_grid.x);
// printf("% d %d and %d are no of threads per block \n", dim_block.x, dim_block.y, dim_block.z);
const THREAD_PARAMETERS parameters = *((THREAD_PARAMETERS *) arg);
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1) ;
int size = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * (geom->nZ + 2*geom->bounZ) * ( nL+1) * (nL+1);
complex_double *src_dev, *out_dev;
if(cudaSetDevice(parameters.device_index) != cudaSuccess){
printf("Error in setting up device %d \n", parameters.device_index);
exit(0);
}
MY_SAFE_CALL(cudaMalloc(&src_dev, sizeof(complex_double)*size));
MY_SAFE_CALL(cudaMalloc(&out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemcpy(src_dev, parameters.src_host,sizeof(complex_double)*size, cudaMemcpyHostToDevice));
Info_Dyn info_dyn_dev;
MY_SAFE_CALL(cudaMalloc(&(info_dyn_dev.tiss_type), sizeof(byte)*geom->no_vox));
MY_SAFE_CALL(cudaMemcpy(info_dyn_dev.tiss_type, phan->tiss_type,sizeof(byte)*geom->no_vox, cudaMemcpyHostToDevice));
MY_SAFE_CALL(cudaMemcpyToSymbol(info_stat,info_stat_host,sizeof(Info_Stat) ));
MY_SAFE_CALL(cudaMemcpyToSymbol(nL_dev,&nL,sizeof(SHORT) ));
MY_SAFE_CALL(cudaMemcpyToSymbol(diag_terms_dev,diag_terms_host, sizeof(complex_double)*MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL));
complex_double *sph_harm_dev;
MY_SAFE_CALL(cudaMalloc(&sph_harm_dev, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
MY_SAFE_CALL(cudaMemcpy(sph_harm_dev,sph_harm, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES,cudaMemcpyHostToDevice));
#if 0
for(cnt=0; cnt < (nL+1)*(nL+1); cnt++){
printf("Invoking compute_diagonal_abs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
write_dist_dev<<<dim_grid, dim_block>>>(src_dev,1+0.0*I,cnt,block_dep, parameters.layer_start);
}
}
#endif
#if 1
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
compute_diagonal_abs<<<dim_grid, dim_block>>>(src_dev,out_dev,info_dyn_dev,cnt,block_dep, parameters.layer_start, parameters.flag);
checkCUDAError("Kernel invocation in compute_diagonal_abs\n");
}
#endif
/* The prop_thresh condition. Again run thread for all the voxels */
#if 1
// printf("Invoking compute_propabs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
compute_propabs<<<dim_grid, dim_block>>>(src_dev, out_dev, info_dyn_dev, sph_harm_dev, cnt, block_dep,parameters.layer_start,parameters.flag);
//printf("%d operation complete \n", block_dep/parameters.num_layers);
checkCUDAError("Kernel invocation in compute_propabs\n");
}
#endif
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
scale_dist_dev<<<dim_grid, dim_block>>>(out_dev, geom->delX * geom->delY * geom->delZ,cnt,block_dep);
checkCUDAError("Kernel invocation in scale_dist_dev\n");
}
#if 1
if(parameters.flag_scat){
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
prop_scat_dev<<<dim_grid, dim_block>>>(out_dev, info_dyn_dev,parameters.layer_start,block_dep);
checkCUDAError("Kernel invocation in prop_dscat_dev\n");
}
}
#endif
cudaThreadSynchronize();
MY_SAFE_CALL(cudaMemcpy(parameters.out_host, out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers), cudaMemcpyDeviceToHost));
MY_SAFE_CALL(cudaMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemset(src_dev, 0, sizeof(complex_double)*size));
MY_SAFE_CALL(cudaMemset(sph_harm_dev, 0, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
cudaFree(src_dev);
cudaFree(out_dev);
cudaFree(sph_harm_dev);
cudaThreadExit();
pthread_exit(NULL);
#endif
}
|
e5feab61b5bb98312eb040e8d2840da0cee7b0ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 256
__global__ void add(int *a, int *b, int *c);
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int i;
// allocate space for device copies
hipMalloc(&d_a, N*sizeof(int));
hipMalloc(&d_b, N*sizeof(int));
hipMalloc(&d_c, N*sizeof(int));
// allocate variables
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
// attribute values to arrays
for(i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
// copy inputs to device
hipMemcpy(d_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, N*sizeof(int), hipMemcpyHostToDevice);
// Lauch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c);
// copy result back to Host
hipMemcpy(c, d_c, N*sizeof(int), hipMemcpyDeviceToHost);
for(i = 0; i < N; i++)
printf("c[%d] = %d\n", i + 1, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
__global__ void add(int *a, int *b, int *c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
| e5feab61b5bb98312eb040e8d2840da0cee7b0ae.cu | #include <stdio.h>
#define N 256
__global__ void add(int *a, int *b, int *c);
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int i;
// allocate space for device copies
cudaMalloc(&d_a, N*sizeof(int));
cudaMalloc(&d_b, N*sizeof(int));
cudaMalloc(&d_c, N*sizeof(int));
// allocate variables
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
// attribute values to arrays
for(i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
// copy inputs to device
cudaMemcpy(d_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
// Lauch add() kernel on GPU
add<<<N,1>>>(d_a, d_b, d_c);
// copy result back to Host
cudaMemcpy(c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
printf("c[%d] = %d\n", i + 1, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
__global__ void add(int *a, int *b, int *c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
93ddbe23f31666a2f0b76905594fa8b29c571494.hip | // !!! This is a file automatically generated by hipify!!!
#include <cv.h>
#include <highgui.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define RED 2
#define GREEN 1
#define BLUE 0
#define TILE_SIZE 32
#define MASK_WIDTH 3
__constant__ char M[MASK_WIDTH*MASK_WIDTH];
using namespace cv;
__device__ unsigned char clamp(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return (unsigned char)value;
}
__global__ void sobelFilterShareMemTest(unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput){
__shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1];
int n = maskWidth/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+MASK_WIDTH-1), destX = dest % (TILE_SIZE+MASK_WIDTH-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + MASK_WIDTH - 1), destX = dest % (TILE_SIZE + MASK_WIDTH - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + MASK_WIDTH - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
imageOutput[(y * width + x)] = clamp(accum);
__syncthreads();
}
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
int main(int argc, char **argv){
hipSetDevice(0);//GTX980
hipError_t error = hipSuccess;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char h_M[] = {1,0,-1,2,0,-2,1,0,-1};
char* imageName = argv[1];
unsigned char *dataRawImage, *d_dataRawImage, *d_imageOutput, *h_imageOutput, *d_sobelOutput;
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
Size s = image.size();
int width = s.width;
int height = s.height;
int size = sizeof(unsigned char)*width*height*image.channels();
int sizeGray = sizeof(unsigned char)*width*height;
dataRawImage = (unsigned char*)malloc(size);
error = hipMalloc((void**)&d_dataRawImage,size);
if(error != hipSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
h_imageOutput = (unsigned char *)malloc(sizeGray);
error = hipMalloc((void**)&d_imageOutput,sizeGray);
if(error != hipSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
error = hipMalloc((void**)&d_sobelOutput,sizeGray);
if(error != hipSuccess){
printf("Error reservando memoria para d_sobelOutput\n");
exit(-1);
}
dataRawImage = image.data;
startGPU = clock();
error = hipMemcpy(d_dataRawImage,dataRawImage,size, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
error = hipMemcpyToSymbol(M,h_M,sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != hipSuccess){
printf("Error copiando los datos de h_M a d_M \n");
exit(-1);
}
int blockSize = TILE_SIZE;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
hipLaunchKernelGGL(( img2gray), dim3(dimGrid),dim3(dimBlock), 0, 0, d_dataRawImage,width,height,d_imageOutput);
hipDeviceSynchronize();
hipLaunchKernelGGL(( sobelFilterShareMemTest), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageOutput,width,height,MASK_WIDTH,d_sobelOutput);
hipMemcpy(h_imageOutput,d_sobelOutput,sizeGray,hipMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
start = clock();
Mat gray_image_opencv, grad_x, abs_grad_x;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
Sobel(gray_image_opencv,grad_x,CV_8UC1,1,0,3,1,0,BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
end = clock();
imwrite("./Sobel_Image.jpg",gray_image);
/* namedWindow(imageName, WINDOW_NORMAL);
namedWindow("Gray Image CUDA", WINDOW_NORMAL);
namedWindow("Sobel Image OpenCV", WINDOW_NORMAL);
imshow(imageName,image);
imshow("Gray Image CUDA", gray_image);
imshow("Sobel Image OpenCV",abs_grad_x);
waitKey(0);*/
//free(dataRawImage);
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
//printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
//printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
//printf("La aceleracin obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
printf("%.10f,%.10f\n",cpu_time_used,gpu_time_used);
hipFree(d_dataRawImage);
hipFree(d_imageOutput);
hipFree(M);
hipFree(d_sobelOutput);
return 0;
}
| 93ddbe23f31666a2f0b76905594fa8b29c571494.cu | #include <cv.h>
#include <highgui.h>
#include <time.h>
#include <cuda.h>
#define RED 2
#define GREEN 1
#define BLUE 0
#define TILE_SIZE 32
#define MASK_WIDTH 3
__constant__ char M[MASK_WIDTH*MASK_WIDTH];
using namespace cv;
__device__ unsigned char clamp(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return (unsigned char)value;
}
__global__ void sobelFilterShareMemTest(unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput){
__shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1];
int n = maskWidth/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+MASK_WIDTH-1), destX = dest % (TILE_SIZE+MASK_WIDTH-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + MASK_WIDTH - 1), destX = dest % (TILE_SIZE + MASK_WIDTH - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + MASK_WIDTH - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
imageOutput[(y * width + x)] = clamp(accum);
__syncthreads();
}
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
int main(int argc, char **argv){
cudaSetDevice(0);//GTX980
cudaError_t error = cudaSuccess;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char h_M[] = {1,0,-1,2,0,-2,1,0,-1};
char* imageName = argv[1];
unsigned char *dataRawImage, *d_dataRawImage, *d_imageOutput, *h_imageOutput, *d_sobelOutput;
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
Size s = image.size();
int width = s.width;
int height = s.height;
int size = sizeof(unsigned char)*width*height*image.channels();
int sizeGray = sizeof(unsigned char)*width*height;
dataRawImage = (unsigned char*)malloc(size);
error = cudaMalloc((void**)&d_dataRawImage,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
h_imageOutput = (unsigned char *)malloc(sizeGray);
error = cudaMalloc((void**)&d_imageOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
error = cudaMalloc((void**)&d_sobelOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_sobelOutput\n");
exit(-1);
}
dataRawImage = image.data;
startGPU = clock();
error = cudaMemcpy(d_dataRawImage,dataRawImage,size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
error = cudaMemcpyToSymbol(M,h_M,sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != cudaSuccess){
printf("Error copiando los datos de h_M a d_M \n");
exit(-1);
}
int blockSize = TILE_SIZE;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
img2gray<<<dimGrid,dimBlock>>>(d_dataRawImage,width,height,d_imageOutput);
cudaDeviceSynchronize();
sobelFilterShareMemTest<<<dimGrid,dimBlock>>>(d_imageOutput,width,height,MASK_WIDTH,d_sobelOutput);
cudaMemcpy(h_imageOutput,d_sobelOutput,sizeGray,cudaMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
start = clock();
Mat gray_image_opencv, grad_x, abs_grad_x;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
Sobel(gray_image_opencv,grad_x,CV_8UC1,1,0,3,1,0,BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
end = clock();
imwrite("./Sobel_Image.jpg",gray_image);
/* namedWindow(imageName, WINDOW_NORMAL);
namedWindow("Gray Image CUDA", WINDOW_NORMAL);
namedWindow("Sobel Image OpenCV", WINDOW_NORMAL);
imshow(imageName,image);
imshow("Gray Image CUDA", gray_image);
imshow("Sobel Image OpenCV",abs_grad_x);
waitKey(0);*/
//free(dataRawImage);
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
//printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
//printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
//printf("La aceleración obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
printf("%.10f,%.10f\n",cpu_time_used,gpu_time_used);
cudaFree(d_dataRawImage);
cudaFree(d_imageOutput);
cudaFree(M);
cudaFree(d_sobelOutput);
return 0;
}
|
9b2a2c7f3b3a280a893425aa03c94e7305736db2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @author Samuele Germiniani e Giulio Mazzi
* Univerity of Verona, Dept. of Computer Science <br>
* [email protected] <br>
* [email protected]
* @date October, 2017
* @version v1
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/PageRank/PageRank.cuh"
#include "Core/Auxilary/DuplicateRemoving.cuh"
#include <GraphIO/GraphStd.hpp>
#include <GraphIO/BFS.hpp>
#include <queue>
#include <iostream>
#include "Device/Primitives/WarpReduce.cuh"
namespace hornets_nest {
//------------------------------------------------------------------------------
///////////////
// OPERATORS //
///////////////
struct InitOperator {
residual_t* actual_residual;
residual_t* new_residual;
degree_t* out_degrees;
rank_t* page_rank;
rank_t initial_page_rank;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
actual_residual[src] = 0.0f;
new_residual[src] = 0.0f;
out_degrees[src] = vertex.degree();
page_rank[src] = initial_page_rank;
}
};
struct ResidualReset {
residual_t* residual;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
residual[src] = 0.0f;
}
};
struct MoveResidual {
residual_t* actual_residual;
residual_t* new_residual;
residual_t* reduce;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
actual_residual[src] += new_residual[src];
new_residual[src] = 0.0f;
reduce[src] = 0.0f;
}
};
struct ResidualOperation {
residual_t* residual;
degree_t* out_degrees;
OPERATOR(Vertex& vertex, Edge& edge) {
auto dst = edge.dst_id();
atomicAdd(&residual[vertex.id()], (1.0f / out_degrees[dst] ));
}
};
struct ResidualNormalization {
residual_t* residual;
float teleport_parameter;
OPERATOR(Vertex& vertex) {
residual[vertex.id()] = (1.0f - teleport_parameter ) *
teleport_parameter * residual[vertex.id()];
}
};
struct PageRankUpdate {
rank_t* page_rank;
residual_t* residual;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
page_rank[src] += residual[src];
}
};
struct Normalize {
rank_t* page_rank;
float* norm;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
page_rank[src] /= norm[0];
}
};
struct PageRankPropagation {
residual_t* actual_residual;
residual_t* new_residual;
degree_t* out_degrees;
float teleport_parameter;
OPERATOR(Vertex& vertex, Edge& edge) {
auto dst = edge.dst_id();
auto src = edge.src_id();
atomicAdd(&new_residual[dst], (teleport_parameter *
(actual_residual[src] / out_degrees[src])));
}
};
//------------------------------------------------------------------------------
/////////////
// SUPPORT //
/////////////
// from
// https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__inline__ __device__
float blockReduceSum(float val) {
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
xlib::WarpReduce<>::add(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) xlib::WarpReduce<>::add(val); //Final reduce within first warp
return val;
}
__global__ void deviceReduceKernel(float *in, float* out, int N) {
float sum = 0.0f;
//reduce multiple elements per thread
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x==0)
out[blockIdx.x]=sum;
}
void deviceReduce(float *in, float* out, int N) {
int threads = 512;
int blocks = min((N + threads - 1) / threads, 1024);
hipLaunchKernelGGL(( deviceReduceKernel), dim3(blocks), dim3(threads), 0, 0, in, out, N);
hipLaunchKernelGGL(( deviceReduceKernel), dim3(1), dim3(1024), 0, 0, out, out, blocks);
}
//------------------------------------------------------------------------------
//////////////
// PAGERANK //
//////////////
PageRank::PageRank(HornetGraph& hornet, HornetGraph& inverse) :
StaticAlgorithm(hornet),
load_balacing(hornet),
hornet_inverse(inverse),
load_balacing_inverse(inverse) {
gpu::allocate(actual_residual, hornet.nV());
gpu::allocate(new_residual, hornet.nV());
gpu::allocate(page_rank, hornet.nV());
gpu::allocate(out_degrees, hornet.nV());
gpu::allocate(reduce, hornet.nV());
reset();
}
PageRank::~PageRank() {
gpu::free(actual_residual);
gpu::free(new_residual);
gpu::free(page_rank);
gpu::free(out_degrees);
host::free(page_rank_host);
}
void PageRank::reset() {
}
void PageRank::set_parameters(float teleport, float tresh) {
teleport_parameter = teleport;
threshold = tresh;
}
void PageRank::run() {
forAllVertices(
hornet,
InitOperator{
actual_residual, new_residual,out_degrees,
page_rank,(1-teleport_parameter)} );
forAllEdges(
hornet_inverse,
ResidualOperation{ actual_residual,out_degrees},
load_balacing_inverse );
forAllVertices(
hornet,
ResidualNormalization { actual_residual,teleport_parameter} );
int iteration = 0;
bool over_residual = true;
while ( iteration < 100 && over_residual) {
++iteration;
forAllVertices( hornet, PageRankUpdate { page_rank, actual_residual});
forAllEdges(
hornet,
PageRankPropagation {
actual_residual,new_residual,out_degrees,teleport_parameter},
load_balacing);
forAllVertices(
hornet,
ResidualReset { actual_residual });
forAllVertices(
hornet,
MoveResidual {actual_residual, new_residual,reduce});
deviceReduce(actual_residual, reduce, hornet.nV());
float tot_residual[1];
gpu::copyToHost(reduce, 1,tot_residual);
//std::cout << (tot_residual[0]/hornet.nV()) << std::endl;
if ( tot_residual[0]/hornet.nV() < threshold ) break;
}
std::cout << "Number of iteration: " << iteration << std::endl;
float *tmp = actual_residual;
deviceReduce(page_rank,tmp,hornet.nV());
forAllVertices( hornet, Normalize { page_rank, tmp });
}
void PageRank::release() {
gpu::free(actual_residual);
gpu::free(new_residual);
gpu::free(page_rank);
gpu::free(out_degrees);
host::free(page_rank_host);
actual_residual = nullptr;
new_residual = nullptr;
page_rank = nullptr;
out_degrees = nullptr;
page_rank_host = nullptr;
}
void PageRank::evaluate_sequential_algorithm()
{
host::allocate(page_rank_host,hornet.nV());
residual_t *residual_host;
host::allocate(residual_host,hornet.nV());
int *out_degrees_host;
host::allocate(out_degrees_host,hornet.nV());
using namespace graph;
GraphStd<vid_t, eoff_t> graph(hornet.csr_offsets(), hornet.nV(),
hornet.csr_edges(), hornet.nE());
GraphStd<vid_t, eoff_t> graph_inverse(hornet_inverse.csr_offsets(),
hornet_inverse.nV(), hornet_inverse.csr_edges(), hornet_inverse.nE());
for (size_t i = 0; i < graph.nV(); ++i)
{
page_rank_host[i] = 1.0f - teleport_parameter;
residual_host[i] = 0.0f;
}
for (auto v : graph.V)
out_degrees_host[v.id()] = v.out_degree();
for (auto v : graph_inverse.V)
{
for( auto e : v )
{
residual_host[v.id()] += 1.0f / out_degrees_host[e.dst_id()];
}
residual_host[v.id()] = (1.0f-teleport_parameter) *
teleport_parameter * residual_host[v.id()];
}
std::queue<graph::GraphStd<vid_t, eoff_t>::Vertex> queue_host;
for (auto v : graph.V)
queue_host.push(v);
while ( !queue_host.empty() )
{
auto v = queue_host.front();
queue_host.pop();
page_rank_host[v.id()] += residual_host[v.id()];
for ( auto e : v )
{
residual_t old_residual_host = residual_host[e.dst_id()];
residual_host[e.dst_id()] +=
( (residual_host[v.id()] * teleport_parameter) /
out_degrees_host[v.id()]);
if ( (residual_host[e.dst_id()] >= threshold) &&
(old_residual_host < threshold) )
{
queue_host.push(e.dst());
}
}
residual_host[v.id()] = 0.0f;
}
float norm = 0.0f;
for (size_t i = 0; i < graph.nV(); ++i)
norm += page_rank_host[i];
for (size_t i = 0; i < graph.nV(); ++i)
page_rank_host[i] /= norm;
}
bool PageRank::validate() {
if (page_rank_host == nullptr)
evaluate_sequential_algorithm();
rank_t * gpu_pr;
host::allocate(gpu_pr,hornet.nV());
gpu::copyToHost(page_rank, hornet.nV(),gpu_pr);
std::cout << "host values (first 20): ";
host::printArray(page_rank_host,min(20,hornet.nV()));
std::cout << std::endl << "device values (first 20): ";
gpu::printArray(page_rank,min(20,hornet.nV()));
std::cout << std::endl;
bool is_equal = true;
int number_of_error = 0;
int errori_host_maggiore = 0;
int errori_device_maggiore = 0;
float tot_host = 0.0f;
for (int i = 0; i < hornet.nV(); ++i)
tot_host += page_rank_host[i];
float tot_device = 0.0f;
for (int i = 0; i < hornet.nV(); ++i)
tot_device += gpu_pr[i];
std::cout << "totale host: " << tot_host << " totale device: "
<< tot_device << std::endl;
for (int i = 0; i < hornet.nV(); ++i)
{
if ( abs(page_rank_host[i] - gpu_pr[i])/page_rank_host[i] > 0.3 )
{
++number_of_error;
if (gpu_pr[i] > page_rank_host[i])
++errori_device_maggiore;
else
++errori_host_maggiore;
is_equal = false;
}
}
if (number_of_error > 0)
std::cout << "errors percentage: " << (number_of_error * 100.0) /
hornet.nV()<<"%" << std::endl;
host::free(gpu_pr);
return is_equal;
}
} // namespace hornets_nest
| 9b2a2c7f3b3a280a893425aa03c94e7305736db2.cu | /**
* @author Samuele Germiniani e Giulio Mazzi
* Univerity of Verona, Dept. of Computer Science <br>
* [email protected] <br>
* [email protected]
* @date October, 2017
* @version v1
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/PageRank/PageRank.cuh"
#include "Core/Auxilary/DuplicateRemoving.cuh"
#include <GraphIO/GraphStd.hpp>
#include <GraphIO/BFS.hpp>
#include <queue>
#include <iostream>
#include "Device/Primitives/WarpReduce.cuh"
namespace hornets_nest {
//------------------------------------------------------------------------------
///////////////
// OPERATORS //
///////////////
struct InitOperator {
residual_t* actual_residual;
residual_t* new_residual;
degree_t* out_degrees;
rank_t* page_rank;
rank_t initial_page_rank;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
actual_residual[src] = 0.0f;
new_residual[src] = 0.0f;
out_degrees[src] = vertex.degree();
page_rank[src] = initial_page_rank;
}
};
struct ResidualReset {
residual_t* residual;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
residual[src] = 0.0f;
}
};
struct MoveResidual {
residual_t* actual_residual;
residual_t* new_residual;
residual_t* reduce;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
actual_residual[src] += new_residual[src];
new_residual[src] = 0.0f;
reduce[src] = 0.0f;
}
};
struct ResidualOperation {
residual_t* residual;
degree_t* out_degrees;
OPERATOR(Vertex& vertex, Edge& edge) {
auto dst = edge.dst_id();
atomicAdd(&residual[vertex.id()], (1.0f / out_degrees[dst] ));
}
};
struct ResidualNormalization {
residual_t* residual;
float teleport_parameter;
OPERATOR(Vertex& vertex) {
residual[vertex.id()] = (1.0f - teleport_parameter ) *
teleport_parameter * residual[vertex.id()];
}
};
struct PageRankUpdate {
rank_t* page_rank;
residual_t* residual;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
page_rank[src] += residual[src];
}
};
struct Normalize {
rank_t* page_rank;
float* norm;
OPERATOR(Vertex& vertex) {
vid_t src = vertex.id();
page_rank[src] /= norm[0];
}
};
struct PageRankPropagation {
residual_t* actual_residual;
residual_t* new_residual;
degree_t* out_degrees;
float teleport_parameter;
OPERATOR(Vertex& vertex, Edge& edge) {
auto dst = edge.dst_id();
auto src = edge.src_id();
atomicAdd(&new_residual[dst], (teleport_parameter *
(actual_residual[src] / out_degrees[src])));
}
};
//------------------------------------------------------------------------------
/////////////
// SUPPORT //
/////////////
// from
// https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__inline__ __device__
float blockReduceSum(float val) {
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
xlib::WarpReduce<>::add(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) xlib::WarpReduce<>::add(val); //Final reduce within first warp
return val;
}
__global__ void deviceReduceKernel(float *in, float* out, int N) {
float sum = 0.0f;
//reduce multiple elements per thread
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x==0)
out[blockIdx.x]=sum;
}
void deviceReduce(float *in, float* out, int N) {
int threads = 512;
int blocks = min((N + threads - 1) / threads, 1024);
deviceReduceKernel<<<blocks, threads>>>(in, out, N);
deviceReduceKernel<<<1, 1024>>>(out, out, blocks);
}
//------------------------------------------------------------------------------
//////////////
// PAGERANK //
//////////////
PageRank::PageRank(HornetGraph& hornet, HornetGraph& inverse) :
StaticAlgorithm(hornet),
load_balacing(hornet),
hornet_inverse(inverse),
load_balacing_inverse(inverse) {
gpu::allocate(actual_residual, hornet.nV());
gpu::allocate(new_residual, hornet.nV());
gpu::allocate(page_rank, hornet.nV());
gpu::allocate(out_degrees, hornet.nV());
gpu::allocate(reduce, hornet.nV());
reset();
}
PageRank::~PageRank() {
gpu::free(actual_residual);
gpu::free(new_residual);
gpu::free(page_rank);
gpu::free(out_degrees);
host::free(page_rank_host);
}
void PageRank::reset() {
}
void PageRank::set_parameters(float teleport, float tresh) {
teleport_parameter = teleport;
threshold = tresh;
}
void PageRank::run() {
forAllVertices(
hornet,
InitOperator{
actual_residual, new_residual,out_degrees,
page_rank,(1-teleport_parameter)} );
forAllEdges(
hornet_inverse,
ResidualOperation{ actual_residual,out_degrees},
load_balacing_inverse );
forAllVertices(
hornet,
ResidualNormalization { actual_residual,teleport_parameter} );
int iteration = 0;
bool over_residual = true;
while ( iteration < 100 && over_residual) {
++iteration;
forAllVertices( hornet, PageRankUpdate { page_rank, actual_residual});
forAllEdges(
hornet,
PageRankPropagation {
actual_residual,new_residual,out_degrees,teleport_parameter},
load_balacing);
forAllVertices(
hornet,
ResidualReset { actual_residual });
forAllVertices(
hornet,
MoveResidual {actual_residual, new_residual,reduce});
deviceReduce(actual_residual, reduce, hornet.nV());
float tot_residual[1];
gpu::copyToHost(reduce, 1,tot_residual);
//std::cout << (tot_residual[0]/hornet.nV()) << std::endl;
if ( tot_residual[0]/hornet.nV() < threshold ) break;
}
std::cout << "Number of iteration: " << iteration << std::endl;
float *tmp = actual_residual;
deviceReduce(page_rank,tmp,hornet.nV());
forAllVertices( hornet, Normalize { page_rank, tmp });
}
void PageRank::release() {
gpu::free(actual_residual);
gpu::free(new_residual);
gpu::free(page_rank);
gpu::free(out_degrees);
host::free(page_rank_host);
actual_residual = nullptr;
new_residual = nullptr;
page_rank = nullptr;
out_degrees = nullptr;
page_rank_host = nullptr;
}
void PageRank::evaluate_sequential_algorithm()
{
host::allocate(page_rank_host,hornet.nV());
residual_t *residual_host;
host::allocate(residual_host,hornet.nV());
int *out_degrees_host;
host::allocate(out_degrees_host,hornet.nV());
using namespace graph;
GraphStd<vid_t, eoff_t> graph(hornet.csr_offsets(), hornet.nV(),
hornet.csr_edges(), hornet.nE());
GraphStd<vid_t, eoff_t> graph_inverse(hornet_inverse.csr_offsets(),
hornet_inverse.nV(), hornet_inverse.csr_edges(), hornet_inverse.nE());
for (size_t i = 0; i < graph.nV(); ++i)
{
page_rank_host[i] = 1.0f - teleport_parameter;
residual_host[i] = 0.0f;
}
for (auto v : graph.V)
out_degrees_host[v.id()] = v.out_degree();
for (auto v : graph_inverse.V)
{
for( auto e : v )
{
residual_host[v.id()] += 1.0f / out_degrees_host[e.dst_id()];
}
residual_host[v.id()] = (1.0f-teleport_parameter) *
teleport_parameter * residual_host[v.id()];
}
std::queue<graph::GraphStd<vid_t, eoff_t>::Vertex> queue_host;
for (auto v : graph.V)
queue_host.push(v);
while ( !queue_host.empty() )
{
auto v = queue_host.front();
queue_host.pop();
page_rank_host[v.id()] += residual_host[v.id()];
for ( auto e : v )
{
residual_t old_residual_host = residual_host[e.dst_id()];
residual_host[e.dst_id()] +=
( (residual_host[v.id()] * teleport_parameter) /
out_degrees_host[v.id()]);
if ( (residual_host[e.dst_id()] >= threshold) &&
(old_residual_host < threshold) )
{
queue_host.push(e.dst());
}
}
residual_host[v.id()] = 0.0f;
}
float norm = 0.0f;
for (size_t i = 0; i < graph.nV(); ++i)
norm += page_rank_host[i];
for (size_t i = 0; i < graph.nV(); ++i)
page_rank_host[i] /= norm;
}
bool PageRank::validate() {
if (page_rank_host == nullptr)
evaluate_sequential_algorithm();
rank_t * gpu_pr;
host::allocate(gpu_pr,hornet.nV());
gpu::copyToHost(page_rank, hornet.nV(),gpu_pr);
std::cout << "host values (first 20): ";
host::printArray(page_rank_host,min(20,hornet.nV()));
std::cout << std::endl << "device values (first 20): ";
gpu::printArray(page_rank,min(20,hornet.nV()));
std::cout << std::endl;
bool is_equal = true;
int number_of_error = 0;
int errori_host_maggiore = 0;
int errori_device_maggiore = 0;
float tot_host = 0.0f;
for (int i = 0; i < hornet.nV(); ++i)
tot_host += page_rank_host[i];
float tot_device = 0.0f;
for (int i = 0; i < hornet.nV(); ++i)
tot_device += gpu_pr[i];
std::cout << "totale host: " << tot_host << " totale device: "
<< tot_device << std::endl;
for (int i = 0; i < hornet.nV(); ++i)
{
if ( abs(page_rank_host[i] - gpu_pr[i])/page_rank_host[i] > 0.3 )
{
++number_of_error;
if (gpu_pr[i] > page_rank_host[i])
++errori_device_maggiore;
else
++errori_host_maggiore;
is_equal = false;
}
}
if (number_of_error > 0)
std::cout << "errors percentage: " << (number_of_error * 100.0) /
hornet.nV()<<"%" << std::endl;
host::free(gpu_pr);
return is_equal;
}
} // namespace hornets_nest
|
b1453dd5d817092ceb64981e01e57c63b0c3f136.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* tex1dfetch_htod.cu
*
* Microbenchmark to measure performance of texturing from host memory.
*
* Build with: nvcc -I ../chLib <options> tex1dfetch_htod.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <assert.h>
#include <chTimer.h>
#include <chError.h>
texture<float, 1> tex;
extern "C" __global__ void
TexReadout( float *out, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += gridDim.x*blockDim.x )
{
out[i] = tex1Dfetch( tex, i );
}
}
template<class T>
float
MeasureBandwidth( void *out, size_t N, int blocks, int threads )
{
hipError_t status;
chTimerTimestamp start, stop;
double Bandwidth = 0.0f;
chTimerGetTime( &start );
hipLaunchKernelGGL(( TexReadout), dim3(2),dim3(384), 0, 0, (float *) out, N );
cuda(DeviceSynchronize());
chTimerGetTime( &stop );
Bandwidth = ((double) N*sizeof(T) / chTimerElapsedTime( &start, &stop ))/1048576.0;
Error:
return (float) Bandwidth;
}
template<class T>
float
ComputeMaximumBandwidth( size_t N )
{
T *inHost = 0;
T *inDevice = 0;
T *outDevice = 0;
T *outHost = 0;
hipError_t status;
bool ret = false;
float fMaxBandwidth = 0.0f;
int cMaxBlocks = 0;
int cMaxThreads = 0;
cuda(HostAlloc( (void **) &inHost, N*sizeof(T), hipHostMallocMapped));
cuda(HostGetDevicePointer( (void **) &inDevice, inHost, 0 ));
cuda(HostAlloc( (void **) &outHost, N*sizeof(T), 0 ) );
cuda(Malloc( (void **) &outDevice, N*sizeof(T)));
for ( int i = 0; i < N; i++ ) {
inHost[i] = (T) i;
}
cuda(BindTexture(NULL, tex, inDevice, hipCreateChannelDesc<T>(), N*sizeof(T)));
{
for ( int cBlocks = 8; cBlocks <= 512; cBlocks += 8 ) {
for ( int cThreads = 16; cThreads <= 512; cThreads += 16 ) {
memset( outHost, 0, N*sizeof(float) );
float bw = MeasureBandwidth<T>( outDevice, N, cBlocks, cThreads );
if ( bw > fMaxBandwidth ) {
fMaxBandwidth = bw;
cMaxBlocks = cBlocks;
cMaxThreads = cThreads;
printf( "New maximum of %.2f M/s reached at %d blocks of %d threads\n",
fMaxBandwidth, cMaxBlocks, cMaxThreads );
}
cuda(Memcpy( outHost, outDevice, N*sizeof(T), hipMemcpyDeviceToHost ) );
for ( int i = 0; i < N; i++ ) {
assert( outHost[i] == inHost[i] );
if ( outHost[i] != inHost[i] ) {
goto Error;
}
}
}
}
}
ret = true;
Error:
hipHostFree( inHost );
hipFree( outDevice );
hipHostFree( outHost );
return ret;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
hipError_t status;
float fMaxBW = 0.0f;
cuda(SetDeviceFlags(hipDeviceMapHost));
cuda(Free(0));
fMaxBW = ComputeMaximumBandwidth<float>(64*1048576);
printf( "Maximum bandwidth achieved: %.2f\n", fMaxBW );
ret = 0;
Error:
return ret;
}
| b1453dd5d817092ceb64981e01e57c63b0c3f136.cu | /*
*
* tex1dfetch_htod.cu
*
* Microbenchmark to measure performance of texturing from host memory.
*
* Build with: nvcc -I ../chLib <options> tex1dfetch_htod.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <assert.h>
#include <chTimer.h>
#include <chError.h>
texture<float, 1> tex;
extern "C" __global__ void
TexReadout( float *out, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += gridDim.x*blockDim.x )
{
out[i] = tex1Dfetch( tex, i );
}
}
template<class T>
float
MeasureBandwidth( void *out, size_t N, int blocks, int threads )
{
cudaError_t status;
chTimerTimestamp start, stop;
double Bandwidth = 0.0f;
chTimerGetTime( &start );
TexReadout<<<2,384>>>( (float *) out, N );
cuda(DeviceSynchronize());
chTimerGetTime( &stop );
Bandwidth = ((double) N*sizeof(T) / chTimerElapsedTime( &start, &stop ))/1048576.0;
Error:
return (float) Bandwidth;
}
template<class T>
float
ComputeMaximumBandwidth( size_t N )
{
T *inHost = 0;
T *inDevice = 0;
T *outDevice = 0;
T *outHost = 0;
cudaError_t status;
bool ret = false;
float fMaxBandwidth = 0.0f;
int cMaxBlocks = 0;
int cMaxThreads = 0;
cuda(HostAlloc( (void **) &inHost, N*sizeof(T), cudaHostAllocMapped));
cuda(HostGetDevicePointer( (void **) &inDevice, inHost, 0 ));
cuda(HostAlloc( (void **) &outHost, N*sizeof(T), 0 ) );
cuda(Malloc( (void **) &outDevice, N*sizeof(T)));
for ( int i = 0; i < N; i++ ) {
inHost[i] = (T) i;
}
cuda(BindTexture(NULL, tex, inDevice, cudaCreateChannelDesc<T>(), N*sizeof(T)));
{
for ( int cBlocks = 8; cBlocks <= 512; cBlocks += 8 ) {
for ( int cThreads = 16; cThreads <= 512; cThreads += 16 ) {
memset( outHost, 0, N*sizeof(float) );
float bw = MeasureBandwidth<T>( outDevice, N, cBlocks, cThreads );
if ( bw > fMaxBandwidth ) {
fMaxBandwidth = bw;
cMaxBlocks = cBlocks;
cMaxThreads = cThreads;
printf( "New maximum of %.2f M/s reached at %d blocks of %d threads\n",
fMaxBandwidth, cMaxBlocks, cMaxThreads );
}
cuda(Memcpy( outHost, outDevice, N*sizeof(T), cudaMemcpyDeviceToHost ) );
for ( int i = 0; i < N; i++ ) {
assert( outHost[i] == inHost[i] );
if ( outHost[i] != inHost[i] ) {
goto Error;
}
}
}
}
}
ret = true;
Error:
cudaFreeHost( inHost );
cudaFree( outDevice );
cudaFreeHost( outHost );
return ret;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
cudaError_t status;
float fMaxBW = 0.0f;
cuda(SetDeviceFlags(cudaDeviceMapHost));
cuda(Free(0));
fMaxBW = ComputeMaximumBandwidth<float>(64*1048576);
printf( "Maximum bandwidth achieved: %.2f\n", fMaxBW );
ret = 0;
Error:
return ret;
}
|
2883440266f7c1e93c36865e234a37ed502f8005.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include "flamegpu/gpu/CUDAScanCompaction.h"
#include "flamegpu/gpu/CUDAErrorChecking.h"
#include "flamegpu/gpu/CUDAAgentModel.h"
/**
* CUDAScanCompaction methods
*/
void CUDAScanCompaction::purge() {
memset(configs, 0, sizeof(configs));
}
void CUDAScanCompaction::resize(const unsigned int& newCount, const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].resize_scan_flag(newCount);
}
void CUDAScanCompaction::zero(const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].zero_scan_flag();
}
const CUDAScanCompactionConfig &CUDAScanCompaction::getConfig(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
CUDAScanCompactionConfig &CUDAScanCompaction::Config(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
/**
*
*/
CUDAScanCompactionConfig::~CUDAScanCompactionConfig() {
free_scan_flag();
}
void CUDAScanCompactionConfig::free_scan_flag() {
if (d_ptrs.scan_flag) {
gpuErrchk(hipFree(d_ptrs.scan_flag));
d_ptrs.scan_flag = nullptr;
}
if (d_ptrs.position) {
gpuErrchk(hipFree(d_ptrs.position));
d_ptrs.position = nullptr;
}
}
void CUDAScanCompactionConfig::zero_scan_flag() {
if (d_ptrs.position) {
gpuErrchk(hipMemset(d_ptrs.position, 0, scan_flag_len * sizeof(unsigned int)));
}
if (d_ptrs.scan_flag) {
gpuErrchk(hipMemset(d_ptrs.scan_flag, 0, scan_flag_len * sizeof(unsigned int)));
}
}
void CUDAScanCompactionConfig::resize_scan_flag(const unsigned int& count) {
if (count + 1 > scan_flag_len) {
free_scan_flag();
gpuErrchk(hipMalloc(&d_ptrs.scan_flag, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
gpuErrchk(hipMalloc(&d_ptrs.position, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
scan_flag_len = count + 1;
}
}
| 2883440266f7c1e93c36865e234a37ed502f8005.cu | #include <cassert>
#include "flamegpu/gpu/CUDAScanCompaction.h"
#include "flamegpu/gpu/CUDAErrorChecking.h"
#include "flamegpu/gpu/CUDAAgentModel.h"
/**
* CUDAScanCompaction methods
*/
void CUDAScanCompaction::purge() {
memset(configs, 0, sizeof(configs));
}
void CUDAScanCompaction::resize(const unsigned int& newCount, const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].resize_scan_flag(newCount);
}
void CUDAScanCompaction::zero(const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].zero_scan_flag();
}
const CUDAScanCompactionConfig &CUDAScanCompaction::getConfig(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
CUDAScanCompactionConfig &CUDAScanCompaction::Config(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
/**
*
*/
CUDAScanCompactionConfig::~CUDAScanCompactionConfig() {
free_scan_flag();
}
void CUDAScanCompactionConfig::free_scan_flag() {
if (d_ptrs.scan_flag) {
gpuErrchk(cudaFree(d_ptrs.scan_flag));
d_ptrs.scan_flag = nullptr;
}
if (d_ptrs.position) {
gpuErrchk(cudaFree(d_ptrs.position));
d_ptrs.position = nullptr;
}
}
void CUDAScanCompactionConfig::zero_scan_flag() {
if (d_ptrs.position) {
gpuErrchk(cudaMemset(d_ptrs.position, 0, scan_flag_len * sizeof(unsigned int)));
}
if (d_ptrs.scan_flag) {
gpuErrchk(cudaMemset(d_ptrs.scan_flag, 0, scan_flag_len * sizeof(unsigned int)));
}
}
void CUDAScanCompactionConfig::resize_scan_flag(const unsigned int& count) {
if (count + 1 > scan_flag_len) {
free_scan_flag();
gpuErrchk(cudaMalloc(&d_ptrs.scan_flag, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
gpuErrchk(cudaMalloc(&d_ptrs.position, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
scan_flag_len = count + 1;
}
}
|
e467ecaaca6852493d9297c3528e8a4312018557.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This program explores the order in which
the threads operates in the CUDA. The print pattern
suggests that there is no order.
*/
#include "stdio.h"
#include "cuPrintf.hip"
#include "cuPrintf_hip.cuh"
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void helloCUDA()
{
cuPrintf("Hello World!! This is thread %d.\n", blockIdx.x);
}
int main()
{
cudaPrintfInit();
hipLaunchKernelGGL(( helloCUDA), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
hipDeviceSynchronize();
printf("Thats all!!!\n");
return 0;
}
| e467ecaaca6852493d9297c3528e8a4312018557.cu | /* This program explores the order in which
the threads operates in the CUDA. The print pattern
suggests that there is no order.
*/
#include "stdio.h"
#include "cuPrintf.cu"
#include "cuPrintf.cuh"
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void helloCUDA()
{
cuPrintf("Hello World!! This is thread %d.\n", blockIdx.x);
}
int main()
{
cudaPrintfInit();
helloCUDA<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
cudaDeviceSynchronize();
printf("Thats all!!!\n");
return 0;
}
|
7b1374bbd5ddf5adf74fd4fa6e67e54a80d434a3.hip | // !!! This is a file automatically generated by hipify!!!
/* Using cuSPARSE for matrix vector multplication of completed affinity */
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include "utils.h"
#include <time.h>
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 1;
double norm = 0;
int bin_width = 10;
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descr = 0;
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
h_vec_t<int> distance_1;
int num_feat_1 = atoi(argv[2]);
ReadMatrix(distance_1, argv[1], num_feat_1);
//#ifdef ACCELERATE
// std::cout << "CUDA" << std::endl;
// d_vec_t<unsigned> d_distance_1 = distance_1;
//#endif
h_vec_t<double> distance_2;
int num_feat_2 = atoi(argv[4]);
ReadMatrix(distance_2, argv[3], num_feat_2);
//#ifdef ACCELERATE
// d_vec_t<double> d_distance_2 = distance_2;
//#endif
int num_iters = 20;
if (8 == argc)
num_iters = atoi(argv[7]);
/**************************************************
* find unique values of distance1 and their indices
***************************************************/
//#ifdef ACCELERATE
// d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1);
// d_uniq_keys.erase(
// remove_if(d_uniq_keys.begin(), d_uniq_keys.end(),
// IsLessThan(bin_width)),
// d_uniq_keys.end());
//#else
//std::cout << "HOST" << std::endl;
h_vec_t<unsigned> uniq_keys = FindUniques(distance_1);
uniq_keys.erase(
remove_if(uniq_keys.begin(), uniq_keys.end(), IsLessThan(bin_width)),
uniq_keys.end());
//#endif
//
//#ifdef ACCELERATE
// d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()];
// for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
// d_keys_idcs[i].resize(d_distance_1.size());
// }
//#else
h_vec_t<int> *keys_idcs = new h_vec_t<int>[uniq_keys.size()];
for (unsigned i = 0; i < uniq_keys.size(); ++i) {
keys_idcs[i].resize(distance_1.size());
}
//#endif
counting_iterator<unsigned> first_idx(0);
counting_iterator<unsigned> last_idx1 = first_idx + distance_1.size();
//#ifdef ACCELERATE
// for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
// transform(ZIP2(d_distance_1.begin(), first_idx),
// ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(),
// IsEqual(d_uniq_keys[i]));
//
// d_keys_idcs[i].erase(
// remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1),
// d_keys_idcs[i].end());
// }
//#else
for (unsigned i = 0; i < uniq_keys.size(); ++i) {
transform(ZIP2(distance_1.begin(), first_idx),
ZIP2(distance_1.end(), last_idx1), keys_idcs[i].begin(),
IsEqual(uniq_keys[i]));
keys_idcs[i].erase(remove(keys_idcs[i].begin(), keys_idcs[i].end(), -1),
keys_idcs[i].end());
}
//#endif
/***************************************************
* construct COO sparse respresentative of affinity *
***************************************************/
double *distance2_ptr = raw_pointer_cast(distance_2.data());
unsigned len_affinity_block = num_feat_2 * num_feat_2;
h_vec_t<double> *h_coo_val = new h_vec_t<double>[uniq_keys.size()];
h_vec_t<int> *h_coo_row = new h_vec_t<int>[uniq_keys.size()];
h_vec_t<int> *h_coo_col = new h_vec_t<int>[uniq_keys.size()];
d_vec_t<double> *d_coo_val = new d_vec_t<double>[uniq_keys.size()];
d_vec_t<int> *d_coo_row = new d_vec_t<int>[uniq_keys.size()];
d_vec_t<int> *d_coo_col = new d_vec_t<int>[uniq_keys.size()];
d_vec_t<int> *d_csr_row = new d_vec_t<int>[uniq_keys.size()];
const clock_t begin_time = clock();
for (int i = 0; i < uniq_keys.size(); ++i) {
unsigned key = uniq_keys[i];
stdvec_tuple_t affanity_coo =
AffinityBlocksCoo(distance2_ptr, key, num_feat_2);
h_coo_val[i] = get<0>(affanity_coo);
h_coo_col[i] = (get<1>(affanity_coo));
h_coo_row[i] = (get<2>(affanity_coo));
d_coo_val[i] = h_coo_val[i];
d_coo_row[i] = h_coo_row[i];
d_coo_col[i] = h_coo_col[i];
}
std::cout << "affinity runtime: "
<< (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
for (int i = 0; i < uniq_keys.size(); ++i) {
d_csr_row[i].resize(num_feat_2 + 1);
hipsparseXcoo2csr(handle, raw_pointer_cast(d_coo_row[i].data()),
d_coo_row[i].size(), num_feat_2,
raw_pointer_cast(d_csr_row[i].data()),
HIPSPARSE_INDEX_BASE_ZERO);
}
// make_tuple(h_coo_val, h_coo_row, h_coo_col);
//std::cout << "affinity" << std::endl;
// std::cout << " unq keys: " << uniq_keys[i] << std::endl;
// std::cout << " values "
// << " "
// << "columns"
// << " "
// << "rows" << std::endl;
// std::cout << uniq_keys.size() << std::endl;
// for (int i = 0; i < uniq_keys.size(); ++i) {
// for (int j = 0; j < d_coo_val[i].size(); ++j) {
// std::cout << d_coo_val[i][j] << " " << d_coo_col[i][j] << " "
// << d_coo_row[i][j] << " " << std::endl;
// }
// std::cout << std::endl;
// }
//std::cout << std::endl;
//for (int i = 0; i < uniq_keys.size(); ++i) {
// std::cout << " csr rows " << std::endl;
// std::cout << " unq keys: " << uniq_keys[i] << std::endl;
// for (int j = 0; j < d_csr_row[i].size(); ++j) {
// std::cout << d_csr_row[i][j] << std::endl;
// }
//}
//std::cout << std::endl;
/******************************************************
* initialize eigen vectors *
******************************************************/
// hipsparseCreate(&handle);
int len_eigen_vec = num_feat_1 * num_feat_2;
d_vec_t<double> d_eigen_vec_new(len_eigen_vec);
d_vec_t<double> d_eigen_vec_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(d_eigen_vec_old.begin(), d_eigen_vec_old.end(), norm);
/******************************************************
* compute eigen vectors *
******************************************************/
const clock_t begin_time2 = clock();
for (int iter = 0; iter < num_iters; ++iter) {
// Create a stream for each operation
hipStream_t *streams =
(hipStream_t *)malloc(uniq_keys.size() * sizeof(hipStream_t));
for (int i = 0; i < uniq_keys.size(); i++)
hipStreamCreate(&streams[i]);
for (int i = 0; i < uniq_keys.size(); ++i) {
hipsparseSetStream(handle, streams[i]);
for (int j = 0; j < keys_idcs[i].size(); ++j) {
int row = keys_idcs[i][j] / num_feat_1;
int col = keys_idcs[i][j] % num_feat_1;
hipsparseDcsrmv(
handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2,
d_coo_val[i].size(), &alpha, descr,
raw_pointer_cast(d_coo_val[i].data()),
raw_pointer_cast(d_csr_row[i].data()),
raw_pointer_cast(d_coo_col[i].data()),
raw_pointer_cast(d_eigen_vec_old.data()) + col * num_feat_2, &beta,
raw_pointer_cast(d_eigen_vec_new.data()) + row * num_feat_2);
}
}
double init = 0;
norm = std::sqrt(transform_reduce(d_eigen_vec_new.begin(),
d_eigen_vec_new.end(), square(), init,
thrust::plus<double>()));
transform(d_eigen_vec_new.begin(), d_eigen_vec_new.end(),
d_eigen_vec_old.begin(), division(norm));
fill(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
// for (int i = 0; i < d_eigen_vec_old.size(); i++) {
// std::cout << "d_eigen new value = " << d_eigen_vec_new[i] << " ";
// std::cout << "d_eigen old value = " << d_eigen_vec_old[i] << std::endl;
// }
hipsparseDestroy(handle);
return 0;
}
| 7b1374bbd5ddf5adf74fd4fa6e67e54a80d434a3.cu | /* Using cuSPARSE for matrix vector multplication of completed affinity */
#include <cuda_runtime.h>
#include <cusparse.h>
#include "utils.h"
#include <time.h>
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 1;
double norm = 0;
int bin_width = 10;
cusparseHandle_t handle = 0;
cusparseMatDescr_t descr = 0;
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
h_vec_t<int> distance_1;
int num_feat_1 = atoi(argv[2]);
ReadMatrix(distance_1, argv[1], num_feat_1);
//#ifdef ACCELERATE
// std::cout << "CUDA" << std::endl;
// d_vec_t<unsigned> d_distance_1 = distance_1;
//#endif
h_vec_t<double> distance_2;
int num_feat_2 = atoi(argv[4]);
ReadMatrix(distance_2, argv[3], num_feat_2);
//#ifdef ACCELERATE
// d_vec_t<double> d_distance_2 = distance_2;
//#endif
int num_iters = 20;
if (8 == argc)
num_iters = atoi(argv[7]);
/**************************************************
* find unique values of distance1 and their indices
***************************************************/
//#ifdef ACCELERATE
// d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1);
// d_uniq_keys.erase(
// remove_if(d_uniq_keys.begin(), d_uniq_keys.end(),
// IsLessThan(bin_width)),
// d_uniq_keys.end());
//#else
//std::cout << "HOST" << std::endl;
h_vec_t<unsigned> uniq_keys = FindUniques(distance_1);
uniq_keys.erase(
remove_if(uniq_keys.begin(), uniq_keys.end(), IsLessThan(bin_width)),
uniq_keys.end());
//#endif
//
//#ifdef ACCELERATE
// d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()];
// for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
// d_keys_idcs[i].resize(d_distance_1.size());
// }
//#else
h_vec_t<int> *keys_idcs = new h_vec_t<int>[uniq_keys.size()];
for (unsigned i = 0; i < uniq_keys.size(); ++i) {
keys_idcs[i].resize(distance_1.size());
}
//#endif
counting_iterator<unsigned> first_idx(0);
counting_iterator<unsigned> last_idx1 = first_idx + distance_1.size();
//#ifdef ACCELERATE
// for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
// transform(ZIP2(d_distance_1.begin(), first_idx),
// ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(),
// IsEqual(d_uniq_keys[i]));
//
// d_keys_idcs[i].erase(
// remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1),
// d_keys_idcs[i].end());
// }
//#else
for (unsigned i = 0; i < uniq_keys.size(); ++i) {
transform(ZIP2(distance_1.begin(), first_idx),
ZIP2(distance_1.end(), last_idx1), keys_idcs[i].begin(),
IsEqual(uniq_keys[i]));
keys_idcs[i].erase(remove(keys_idcs[i].begin(), keys_idcs[i].end(), -1),
keys_idcs[i].end());
}
//#endif
/***************************************************
* construct COO sparse respresentative of affinity *
***************************************************/
double *distance2_ptr = raw_pointer_cast(distance_2.data());
unsigned len_affinity_block = num_feat_2 * num_feat_2;
h_vec_t<double> *h_coo_val = new h_vec_t<double>[uniq_keys.size()];
h_vec_t<int> *h_coo_row = new h_vec_t<int>[uniq_keys.size()];
h_vec_t<int> *h_coo_col = new h_vec_t<int>[uniq_keys.size()];
d_vec_t<double> *d_coo_val = new d_vec_t<double>[uniq_keys.size()];
d_vec_t<int> *d_coo_row = new d_vec_t<int>[uniq_keys.size()];
d_vec_t<int> *d_coo_col = new d_vec_t<int>[uniq_keys.size()];
d_vec_t<int> *d_csr_row = new d_vec_t<int>[uniq_keys.size()];
const clock_t begin_time = clock();
for (int i = 0; i < uniq_keys.size(); ++i) {
unsigned key = uniq_keys[i];
stdvec_tuple_t affanity_coo =
AffinityBlocksCoo(distance2_ptr, key, num_feat_2);
h_coo_val[i] = get<0>(affanity_coo);
h_coo_col[i] = (get<1>(affanity_coo));
h_coo_row[i] = (get<2>(affanity_coo));
d_coo_val[i] = h_coo_val[i];
d_coo_row[i] = h_coo_row[i];
d_coo_col[i] = h_coo_col[i];
}
std::cout << "affinity runtime: "
<< (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
for (int i = 0; i < uniq_keys.size(); ++i) {
d_csr_row[i].resize(num_feat_2 + 1);
cusparseXcoo2csr(handle, raw_pointer_cast(d_coo_row[i].data()),
d_coo_row[i].size(), num_feat_2,
raw_pointer_cast(d_csr_row[i].data()),
CUSPARSE_INDEX_BASE_ZERO);
}
// make_tuple(h_coo_val, h_coo_row, h_coo_col);
//std::cout << "affinity" << std::endl;
// std::cout << " unq keys: " << uniq_keys[i] << std::endl;
// std::cout << " values "
// << " "
// << "columns"
// << " "
// << "rows" << std::endl;
// std::cout << uniq_keys.size() << std::endl;
// for (int i = 0; i < uniq_keys.size(); ++i) {
// for (int j = 0; j < d_coo_val[i].size(); ++j) {
// std::cout << d_coo_val[i][j] << " " << d_coo_col[i][j] << " "
// << d_coo_row[i][j] << " " << std::endl;
// }
// std::cout << std::endl;
// }
//std::cout << std::endl;
//for (int i = 0; i < uniq_keys.size(); ++i) {
// std::cout << " csr rows " << std::endl;
// std::cout << " unq keys: " << uniq_keys[i] << std::endl;
// for (int j = 0; j < d_csr_row[i].size(); ++j) {
// std::cout << d_csr_row[i][j] << std::endl;
// }
//}
//std::cout << std::endl;
/******************************************************
* initialize eigen vectors *
******************************************************/
// cusparseCreate(&handle);
int len_eigen_vec = num_feat_1 * num_feat_2;
d_vec_t<double> d_eigen_vec_new(len_eigen_vec);
d_vec_t<double> d_eigen_vec_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(d_eigen_vec_old.begin(), d_eigen_vec_old.end(), norm);
/******************************************************
* compute eigen vectors *
******************************************************/
const clock_t begin_time2 = clock();
for (int iter = 0; iter < num_iters; ++iter) {
// Create a stream for each operation
cudaStream_t *streams =
(cudaStream_t *)malloc(uniq_keys.size() * sizeof(cudaStream_t));
for (int i = 0; i < uniq_keys.size(); i++)
cudaStreamCreate(&streams[i]);
for (int i = 0; i < uniq_keys.size(); ++i) {
cusparseSetStream(handle, streams[i]);
for (int j = 0; j < keys_idcs[i].size(); ++j) {
int row = keys_idcs[i][j] / num_feat_1;
int col = keys_idcs[i][j] % num_feat_1;
cusparseDcsrmv(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2,
d_coo_val[i].size(), &alpha, descr,
raw_pointer_cast(d_coo_val[i].data()),
raw_pointer_cast(d_csr_row[i].data()),
raw_pointer_cast(d_coo_col[i].data()),
raw_pointer_cast(d_eigen_vec_old.data()) + col * num_feat_2, &beta,
raw_pointer_cast(d_eigen_vec_new.data()) + row * num_feat_2);
}
}
double init = 0;
norm = std::sqrt(transform_reduce(d_eigen_vec_new.begin(),
d_eigen_vec_new.end(), square(), init,
thrust::plus<double>()));
transform(d_eigen_vec_new.begin(), d_eigen_vec_new.end(),
d_eigen_vec_old.begin(), division(norm));
fill(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
// for (int i = 0; i < d_eigen_vec_old.size(); i++) {
// std::cout << "d_eigen new value = " << d_eigen_vec_new[i] << " ";
// std::cout << "d_eigen old value = " << d_eigen_vec_old[i] << std::endl;
// }
cusparseDestroy(handle);
return 0;
}
|
4859d609932a723063b5db2769932204601d4034.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuke_gradient.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *surface = NULL;
hipMalloc(&surface, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
size_t pitch = 2;
float t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuke_gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, surface,width,height,pitch,t);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuke_gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, surface,width,height,pitch,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuke_gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, surface,width,height,pitch,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4859d609932a723063b5db2769932204601d4034.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuke_gradient.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *surface = NULL;
cudaMalloc(&surface, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
size_t pitch = 2;
float t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuke_gradient<<<gridBlock,threadBlock>>>(surface,width,height,pitch,t);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuke_gradient<<<gridBlock,threadBlock>>>(surface,width,height,pitch,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuke_gradient<<<gridBlock,threadBlock>>>(surface,width,height,pitch,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ba2dc2be09576d8869561e7bd797ed454733fdf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "FFT.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template <class T> __global__ void FFTCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTFullCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTFullPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
////////////////
//Host methods//
////////////////
template <class T> void d_FFTCrop(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = ElementsFFT(newdims);
size_t elementsold = ElementsFFT(olddims);
T* d_intermediate;
if (d_input == d_output)
hipMalloc((void**)&d_intermediate, ElementsFFT(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x / 2 + 1, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTCropKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
hipMemcpy(d_output, d_intermediate, ElementsFFT(newdims) * batch * sizeof(T), hipMemcpyDeviceToDevice);
hipFree(d_intermediate);
}
}
template void d_FFTCrop<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTCrop<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTFullCrop(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = Elements(newdims);
size_t elementsold = Elements(olddims);
T* d_intermediate;
if (d_input == d_output)
hipMalloc((void**)&d_intermediate, Elements(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTFullCropKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
hipMemcpy(d_output, d_intermediate, Elements(newdims) * batch * sizeof(T), hipMemcpyDeviceToDevice);
hipFree(d_intermediate);
}
}
template void d_FFTFullCrop<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTFullCrop<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTPad(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = ElementsFFT(newdims);
size_t elementsold = ElementsFFT(olddims);
T* d_intermediate;
if (d_input == d_output)
hipMalloc((void**)&d_intermediate, ElementsFFT(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x / 2 + 1, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTPadEvenKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
hipMemcpy(d_output, d_intermediate, ElementsFFT(newdims) * batch * sizeof(T), hipMemcpyDeviceToDevice);
hipFree(d_intermediate);
}
}
template void d_FFTPad<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTPad<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTFullPad(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = Elements(newdims);
size_t elementsold = Elements(olddims);
T* d_intermediate;
if (d_input == d_output)
hipMalloc((void**)&d_intermediate, Elements(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTFullPadEvenKernel << <grid, TpB >> > (d_input, d_output, olddims, newdims);
if (d_input == d_output)
{
hipMemcpy(d_output, d_intermediate, Elements(newdims) * batch * sizeof(T), hipMemcpyDeviceToDevice);
hipFree(d_intermediate);
}
}
template void d_FFTFullPad<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTFullPad<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
////////////////
//CUDA kernels//
////////////////
template <class T> __global__ void FFTCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int y = blockIdx.x;
int yy = y < newdims.y / 2 + 1 ? y : y - newdims.y + olddims.y;
int z = blockIdx.y;
int zz = z < newdims.z / 2 + 1 ? z : z - newdims.z + olddims.z;
/*yy = tmax(0, tmin(yy, olddims.y - 1));
zz = tmax(0, tmin(zz, olddims.z - 1));*/
d_output[(z * newdims.y + y) * (newdims.x / 2 + 1) + x] = d_input[(zz * olddims.y + yy) * (olddims.x / 2 + 1) + x];
}
}
template <class T> __global__ void FFTFullCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
int oldy = blockIdx.x;
if (oldy >= newdims.y / 2)
oldy += olddims.y - newdims.y;
int oldz = blockIdx.y;
if (oldz >= newdims.z / 2)
oldz += olddims.z - newdims.z;
d_input += Elements(olddims) * blockIdx.z + (oldz * olddims.y + oldy) * olddims.x;
d_output += Elements(newdims) * blockIdx.z + (blockIdx.y * newdims.y + blockIdx.x) * newdims.x;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int oldx = x;
if (oldx >= newdims.x / 2)
oldx += olddims.x - newdims.x;
d_output[x] = d_input[oldx];
}
}
template <class T> __global__ void FFTPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int newry = ((blockIdx.x + newdims.y / 2) % newdims.y);
int newrz = ((blockIdx.y + newdims.z / 2) % newdims.z);
int oldry = newry + (olddims.y - newdims.y) / 2;
int oldrz = newrz + (olddims.z - newdims.z) / 2;
if (x < (olddims.x + 1) / 2 && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = d_input[(oldz * olddims.y + oldy) * (olddims.x / 2 + 1) + x];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = (T)0;
}
}
template<> __global__ void FFTPadEvenKernel<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int newry = ((blockIdx.x + newdims.y / 2) % newdims.y);
int newrz = ((blockIdx.y + newdims.z / 2) % newdims.z);
int oldry = newry + (olddims.y - newdims.y) / 2;
int oldrz = newrz + (olddims.z - newdims.z) / 2;
if (x < (olddims.x + 1) / 2 && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = d_input[(oldz * olddims.y + oldy) * (olddims.x / 2 + 1) + x];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = make_cuComplex(0.0f, 0.0f);
}
}
template <class T> __global__ void FFTFullPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += Elements(olddims) * blockIdx.z;
d_output += Elements(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int newrx = ((x + (newdims.x) / 2) % newdims.x);
int newry = ((blockIdx.x + (newdims.y) / 2) % newdims.y);
int newrz = ((blockIdx.y + (newdims.z) / 2) % newdims.z);
int oldrx = newrx + (olddims.x - newdims.x - ((olddims.x & 1 - (newdims.x & 1)) % 2)) / 2;
int oldry = newry + (olddims.y - newdims.y - ((olddims.y & 1 - (newdims.y & 1)) % 2)) / 2;
int oldrz = newrz + (olddims.z - newdims.z - ((olddims.z & 1 - (newdims.z & 1)) % 2)) / 2;
if (oldrx >= 0 && oldrx < olddims.x && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldx = ((oldrx + (olddims.x + 1) / 2) % olddims.x);
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = d_input[(oldz * olddims.y + oldy) * olddims.x + oldx];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = (T)0;
}
}
template<> __global__ void FFTFullPadEvenKernel<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims)
{
d_input += Elements(olddims) * blockIdx.z;
d_output += Elements(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int newrx = ((x + (newdims.x) / 2) % newdims.x);
int newry = ((blockIdx.x + (newdims.y) / 2) % newdims.y);
int newrz = ((blockIdx.y + (newdims.z) / 2) % newdims.z);
int oldrx = newrx + (olddims.x - newdims.x - ((olddims.x & 1 - (newdims.x & 1)) % 2)) / 2;
int oldry = newry + (olddims.y - newdims.y - ((olddims.y & 1 - (newdims.y & 1)) % 2)) / 2;
int oldrz = newrz + (olddims.z - newdims.z - ((olddims.z & 1 - (newdims.z & 1)) % 2)) / 2;
if (oldrx >= 0 && oldrx < olddims.x && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldx = ((oldrx + (olddims.x + 1) / 2) % olddims.x);
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = d_input[(oldz * olddims.y + oldy) * olddims.x + oldx];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = make_cuComplex(0.0f, 0.0f);
}
}
} | ba2dc2be09576d8869561e7bd797ed454733fdf3.cu | #include "Prerequisites.cuh"
#include "FFT.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template <class T> __global__ void FFTCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTFullCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
template <class T> __global__ void FFTFullPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims);
////////////////
//Host methods//
////////////////
template <class T> void d_FFTCrop(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = ElementsFFT(newdims);
size_t elementsold = ElementsFFT(olddims);
T* d_intermediate;
if (d_input == d_output)
cudaMalloc((void**)&d_intermediate, ElementsFFT(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x / 2 + 1, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTCropKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
cudaMemcpy(d_output, d_intermediate, ElementsFFT(newdims) * batch * sizeof(T), cudaMemcpyDeviceToDevice);
cudaFree(d_intermediate);
}
}
template void d_FFTCrop<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTCrop<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTFullCrop(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = Elements(newdims);
size_t elementsold = Elements(olddims);
T* d_intermediate;
if (d_input == d_output)
cudaMalloc((void**)&d_intermediate, Elements(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTFullCropKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
cudaMemcpy(d_output, d_intermediate, Elements(newdims) * batch * sizeof(T), cudaMemcpyDeviceToDevice);
cudaFree(d_intermediate);
}
}
template void d_FFTFullCrop<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTFullCrop<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTPad(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = ElementsFFT(newdims);
size_t elementsold = ElementsFFT(olddims);
T* d_intermediate;
if (d_input == d_output)
cudaMalloc((void**)&d_intermediate, ElementsFFT(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x / 2 + 1, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTPadEvenKernel << <grid, TpB >> > (d_input, d_intermediate, olddims, newdims);
if (d_input == d_output)
{
cudaMemcpy(d_output, d_intermediate, ElementsFFT(newdims) * batch * sizeof(T), cudaMemcpyDeviceToDevice);
cudaFree(d_intermediate);
}
}
template void d_FFTPad<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTPad<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
template <class T> void d_FFTFullPad(T* d_input, T* d_output, int3 olddims, int3 newdims, int batch)
{
size_t elementsnew = Elements(newdims);
size_t elementsold = Elements(olddims);
T* d_intermediate;
if (d_input == d_output)
cudaMalloc((void**)&d_intermediate, Elements(newdims) * batch * sizeof(T));
else
d_intermediate = d_output;
int TpB = min(256, NextMultipleOf(newdims.x, 32));
dim3 grid = dim3(newdims.y, newdims.z, batch);
FFTFullPadEvenKernel << <grid, TpB >> > (d_input, d_output, olddims, newdims);
if (d_input == d_output)
{
cudaMemcpy(d_output, d_intermediate, Elements(newdims) * batch * sizeof(T), cudaMemcpyDeviceToDevice);
cudaFree(d_intermediate);
}
}
template void d_FFTFullPad<tfloat>(tfloat* d_input, tfloat* d_output, int3 olddims, int3 newdims, int batch);
template void d_FFTFullPad<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims, int batch);
////////////////
//CUDA kernels//
////////////////
template <class T> __global__ void FFTCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int y = blockIdx.x;
int yy = y < newdims.y / 2 + 1 ? y : y - newdims.y + olddims.y;
int z = blockIdx.y;
int zz = z < newdims.z / 2 + 1 ? z : z - newdims.z + olddims.z;
/*yy = tmax(0, tmin(yy, olddims.y - 1));
zz = tmax(0, tmin(zz, olddims.z - 1));*/
d_output[(z * newdims.y + y) * (newdims.x / 2 + 1) + x] = d_input[(zz * olddims.y + yy) * (olddims.x / 2 + 1) + x];
}
}
template <class T> __global__ void FFTFullCropKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
int oldy = blockIdx.x;
if (oldy >= newdims.y / 2)
oldy += olddims.y - newdims.y;
int oldz = blockIdx.y;
if (oldz >= newdims.z / 2)
oldz += olddims.z - newdims.z;
d_input += Elements(olddims) * blockIdx.z + (oldz * olddims.y + oldy) * olddims.x;
d_output += Elements(newdims) * blockIdx.z + (blockIdx.y * newdims.y + blockIdx.x) * newdims.x;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int oldx = x;
if (oldx >= newdims.x / 2)
oldx += olddims.x - newdims.x;
d_output[x] = d_input[oldx];
}
}
template <class T> __global__ void FFTPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int newry = ((blockIdx.x + newdims.y / 2) % newdims.y);
int newrz = ((blockIdx.y + newdims.z / 2) % newdims.z);
int oldry = newry + (olddims.y - newdims.y) / 2;
int oldrz = newrz + (olddims.z - newdims.z) / 2;
if (x < (olddims.x + 1) / 2 && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = d_input[(oldz * olddims.y + oldy) * (olddims.x / 2 + 1) + x];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = (T)0;
}
}
template<> __global__ void FFTPadEvenKernel<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims)
{
d_input += ElementsFFT(olddims) * blockIdx.z;
d_output += ElementsFFT(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x / 2 + 1; x += blockDim.x)
{
int newry = ((blockIdx.x + newdims.y / 2) % newdims.y);
int newrz = ((blockIdx.y + newdims.z / 2) % newdims.z);
int oldry = newry + (olddims.y - newdims.y) / 2;
int oldrz = newrz + (olddims.z - newdims.z) / 2;
if (x < (olddims.x + 1) / 2 && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = d_input[(oldz * olddims.y + oldy) * (olddims.x / 2 + 1) + x];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * (newdims.x / 2 + 1) + x] = make_cuComplex(0.0f, 0.0f);
}
}
template <class T> __global__ void FFTFullPadEvenKernel(T* d_input, T* d_output, int3 olddims, int3 newdims)
{
d_input += Elements(olddims) * blockIdx.z;
d_output += Elements(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int newrx = ((x + (newdims.x) / 2) % newdims.x);
int newry = ((blockIdx.x + (newdims.y) / 2) % newdims.y);
int newrz = ((blockIdx.y + (newdims.z) / 2) % newdims.z);
int oldrx = newrx + (olddims.x - newdims.x - ((olddims.x & 1 - (newdims.x & 1)) % 2)) / 2;
int oldry = newry + (olddims.y - newdims.y - ((olddims.y & 1 - (newdims.y & 1)) % 2)) / 2;
int oldrz = newrz + (olddims.z - newdims.z - ((olddims.z & 1 - (newdims.z & 1)) % 2)) / 2;
if (oldrx >= 0 && oldrx < olddims.x && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldx = ((oldrx + (olddims.x + 1) / 2) % olddims.x);
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = d_input[(oldz * olddims.y + oldy) * olddims.x + oldx];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = (T)0;
}
}
template<> __global__ void FFTFullPadEvenKernel<tcomplex>(tcomplex* d_input, tcomplex* d_output, int3 olddims, int3 newdims)
{
d_input += Elements(olddims) * blockIdx.z;
d_output += Elements(newdims) * blockIdx.z;
for (int x = threadIdx.x; x < newdims.x; x += blockDim.x)
{
int newrx = ((x + (newdims.x) / 2) % newdims.x);
int newry = ((blockIdx.x + (newdims.y) / 2) % newdims.y);
int newrz = ((blockIdx.y + (newdims.z) / 2) % newdims.z);
int oldrx = newrx + (olddims.x - newdims.x - ((olddims.x & 1 - (newdims.x & 1)) % 2)) / 2;
int oldry = newry + (olddims.y - newdims.y - ((olddims.y & 1 - (newdims.y & 1)) % 2)) / 2;
int oldrz = newrz + (olddims.z - newdims.z - ((olddims.z & 1 - (newdims.z & 1)) % 2)) / 2;
if (oldrx >= 0 && oldrx < olddims.x && oldry >= 0 && oldry < olddims.y && oldrz >= 0 && oldrz < olddims.z)
{
int oldx = ((oldrx + (olddims.x + 1) / 2) % olddims.x);
int oldy = ((oldry + (olddims.y + 1) / 2) % olddims.y);
int oldz = ((oldrz + (olddims.z + 1) / 2) % olddims.z);
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = d_input[(oldz * olddims.y + oldy) * olddims.x + oldx];
}
else
d_output[(blockIdx.y * newdims.y + blockIdx.x) * newdims.x + x] = make_cuComplex(0.0f, 0.0f);
}
}
} |
63cf1e54cf6551f5662f93b9d7e3f2edb1672026.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_.size(); }
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_.data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_.data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_.data().get(), static_cast<typename common::Span<T>::index_type>(Size())};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {data_d_.data().get(), static_cast<SpanInd>(Size())};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_.begin(), data_d_.end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (Size() == 0 && device_ >= 0) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_.size()) { data_h_.resize(data_d_.size()); }
SetDevice();
dh::safe_cuda(hipMemcpy(data_h_.data(),
data_d_.data().get(),
data_d_.size() * sizeof(T),
hipMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(hipMemcpy(data_d_.data().get(),
data_h_.data(),
data_d_.size() * sizeof(T),
hipMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
private:
int device_{-1};
std::vector<T> data_h_{};
dh::device_vector<T> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_.data().get(), other->data_d_.data().get(),
data_d_.size() * sizeof(T), hipMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_.data().get(), begin,
data_d_.size() * sizeof(T), hipMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (new_size == data_d_.size()) { return; }
SetDevice();
data_d_.resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(new HostDeviceVectorImpl<T>(*other.impl_)) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
| 63cf1e54cf6551f5662f93b9d7e3f2edb1672026.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_.size(); }
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_.data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_.data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_.data().get(), static_cast<typename common::Span<T>::index_type>(Size())};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {data_d_.data().get(), static_cast<SpanInd>(Size())};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_.begin(), data_d_.end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (Size() == 0 && device_ >= 0) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_.size()) { data_h_.resize(data_d_.size()); }
SetDevice();
dh::safe_cuda(cudaMemcpy(data_h_.data(),
data_d_.data().get(),
data_d_.size() * sizeof(T),
cudaMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(cudaMemcpy(data_d_.data().get(),
data_h_.data(),
data_d_.size() * sizeof(T),
cudaMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
private:
int device_{-1};
std::vector<T> data_h_{};
dh::device_vector<T> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_.data().get(), other->data_d_.data().get(),
data_d_.size() * sizeof(T), cudaMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_.data().get(), begin,
data_d_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (new_size == data_d_.size()) { return; }
SetDevice();
data_d_.resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(new HostDeviceVectorImpl<T>(*other.impl_)) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
|
14fab2879213d2fff6907a8f7399a4ad6527eed4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
dmlc::OMPException exc;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
exc.Run([&]() {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
});
}
exc.Rethrow();
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps);
} else if (pneg < eps) {
return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps);
} else {
return -y * ::log(py) - (1.0f - y) * ::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/**
* Gamma deviance
*
* Expected input:
* label >= 0
* predt >= 0
*/
struct EvalGammaDeviance {
const char *Name() const { return "gamma-deviance"; }
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float predt) const {
predt += kRtEps;
label += kRtEps;
return ::log(predt / label) + label / predt - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
if (wsum <= 0) {
wsum = kRtEps;
}
return 2 * esum / wsum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
py = ::max(py, 1e-6f);
// hardcoded dispersion.
float constexpr kPsi = 1.0;
bst_float theta = -1. / py;
bst_float a = kPsi;
// b = -::log(-theta);
float b = 1.0f;
// c = 1. / kPsi * ::log(y/kPsi) - ::log(y) - common::LogGamma(1. / kPsi);
// = 1.0f * ::log(y) - ::log(y) - 0 = 0
float c = 0;
// general form for exponential family.
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| 14fab2879213d2fff6907a8f7399a4ad6527eed4.cu | /*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
dmlc::OMPException exc;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
exc.Run([&]() {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
});
}
exc.Rethrow();
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps);
} else if (pneg < eps) {
return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps);
} else {
return -y * std::log(py) - (1.0f - y) * std::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/**
* Gamma deviance
*
* Expected input:
* label >= 0
* predt >= 0
*/
struct EvalGammaDeviance {
const char *Name() const { return "gamma-deviance"; }
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float predt) const {
predt += kRtEps;
label += kRtEps;
return std::log(predt / label) + label / predt - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
if (wsum <= 0) {
wsum = kRtEps;
}
return 2 * esum / wsum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
py = std::max(py, 1e-6f);
// hardcoded dispersion.
float constexpr kPsi = 1.0;
bst_float theta = -1. / py;
bst_float a = kPsi;
// b = -std::log(-theta);
float b = 1.0f;
// c = 1. / kPsi * std::log(y/kPsi) - std::log(y) - common::LogGamma(1. / kPsi);
// = 1.0f * std::log(y) - std::log(y) - 0 = 0
float c = 0;
// general form for exponential family.
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
614720cee1cc9df1d498bd4e60775339a776d686.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
__global__ void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min,
const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size) return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid] - lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__ void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size) return;
for (int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if (spot >= 0) val = d_bins[spot];
__syncthreads();
if (spot >= 0) d_bins[mid] += val;
__syncthreads();
}
}
// calculate reduce max or min and stick the value in d_answer.
__global__ void reduce_minmax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) {
extern __shared__ float shared[];
int mid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// we have 1 thread per block, so copying the entire block should work fine
if (mid < size) {
shared[tid] = d_in[mid];
} else {
if (minmax == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = -FLT_MAX;
}
// wait for all threads to copy the memory
__syncthreads();
// don't do any thing with memory if we happen to be far off ( I don't know how this works with
// sync threads so I moved it after that point )
if (mid >= size) {
if (tid == 0) {
if (minmax == 0)
d_out[blockIdx.x] = FLT_MAX;
else
d_out[blockIdx.x] = -FLT_MAX;
}
return;
}
for (unsigned int s = blockDim.x / 2; s > 0; s /= 2) {
if (tid < s) {
if (minmax == 0) {
shared[tid] = min(shared[tid], shared[tid + s]);
} else {
shared[tid] = max(shared[tid], shared[tid + s]);
}
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = shared[0];
}
}
int get_max_size(int n, int d) { return (int)ceil((float)n / (float)d) + 1; }
float reduce_minmax(const float* const d_in, const size_t size, int minmax) {
int BLOCK_SIZE = 32;
// we need to keep reducing until we get to the amount that we consider
// having the entire thing fit into one block size
size_t curr_size = size;
float* d_curr_in;
checkCudaErrors(hipMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(hipMemcpy(d_curr_in, d_in, sizeof(float) * size, hipMemcpyDeviceToDevice));
float* d_curr_out;
dim3 thread_dim(BLOCK_SIZE);
const int shared_mem_size = sizeof(float) * BLOCK_SIZE;
while (1) {
checkCudaErrors(hipMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE)));
dim3 block_dim(get_max_size(size, BLOCK_SIZE));
hipLaunchKernelGGL(( reduce_minmax_kernel), dim3(block_dim), dim3(thread_dim), shared_mem_size, 0, d_curr_in, d_curr_out, curr_size, minmax);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// move the current input to the output, and clear the last input if necessary
checkCudaErrors(hipFree(d_curr_in));
d_curr_in = d_curr_out;
if (curr_size < BLOCK_SIZE) break;
curr_size = get_max_size(curr_size, BLOCK_SIZE);
}
// theoretically we should be
float h_out;
hipMemcpy(&h_out, d_curr_out, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_curr_out);
return h_out;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float& min_logLum,
float& max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) {
const size_t size = numRows * numCols;
min_logLum = reduce_minmax(d_logLuminance, size, 0);
max_logLum = reduce_minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int) * numBins;
checkCudaErrors(hipMalloc(&d_bins, histo_size));
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
unsigned int* h_out = new unsigned int[numBins];
hipMemcpy(h_out, d_bins, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost);
unsigned int* h_cdf = new unsigned int[numBins];
h_cdf[0] = 0;
for (int i = 1; i < numBins; ++i) {
h_cdf[i] = h_out[i - 1] + h_cdf[i - 1];
}
checkCudaErrors(hipMemcpy(d_cdf, h_cdf, sizeof(unsigned int) * numBins, hipMemcpyHostToDevice));
// TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
} | 614720cee1cc9df1d498bd4e60775339a776d686.cu |
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
__global__ void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min,
const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size) return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid] - lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__ void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if (mid >= size) return;
for (int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if (spot >= 0) val = d_bins[spot];
__syncthreads();
if (spot >= 0) d_bins[mid] += val;
__syncthreads();
}
}
// calculate reduce max or min and stick the value in d_answer.
__global__ void reduce_minmax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) {
extern __shared__ float shared[];
int mid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// we have 1 thread per block, so copying the entire block should work fine
if (mid < size) {
shared[tid] = d_in[mid];
} else {
if (minmax == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = -FLT_MAX;
}
// wait for all threads to copy the memory
__syncthreads();
// don't do any thing with memory if we happen to be far off ( I don't know how this works with
// sync threads so I moved it after that point )
if (mid >= size) {
if (tid == 0) {
if (minmax == 0)
d_out[blockIdx.x] = FLT_MAX;
else
d_out[blockIdx.x] = -FLT_MAX;
}
return;
}
for (unsigned int s = blockDim.x / 2; s > 0; s /= 2) {
if (tid < s) {
if (minmax == 0) {
shared[tid] = min(shared[tid], shared[tid + s]);
} else {
shared[tid] = max(shared[tid], shared[tid + s]);
}
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = shared[0];
}
}
int get_max_size(int n, int d) { return (int)ceil((float)n / (float)d) + 1; }
float reduce_minmax(const float* const d_in, const size_t size, int minmax) {
int BLOCK_SIZE = 32;
// we need to keep reducing until we get to the amount that we consider
// having the entire thing fit into one block size
size_t curr_size = size;
float* d_curr_in;
checkCudaErrors(cudaMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(cudaMemcpy(d_curr_in, d_in, sizeof(float) * size, cudaMemcpyDeviceToDevice));
float* d_curr_out;
dim3 thread_dim(BLOCK_SIZE);
const int shared_mem_size = sizeof(float) * BLOCK_SIZE;
while (1) {
checkCudaErrors(cudaMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE)));
dim3 block_dim(get_max_size(size, BLOCK_SIZE));
reduce_minmax_kernel<<<block_dim, thread_dim, shared_mem_size>>>(d_curr_in, d_curr_out, curr_size, minmax);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// move the current input to the output, and clear the last input if necessary
checkCudaErrors(cudaFree(d_curr_in));
d_curr_in = d_curr_out;
if (curr_size < BLOCK_SIZE) break;
curr_size = get_max_size(curr_size, BLOCK_SIZE);
}
// theoretically we should be
float h_out;
cudaMemcpy(&h_out, d_curr_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_curr_out);
return h_out;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float& min_logLum,
float& max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) {
const size_t size = numRows * numCols;
min_logLum = reduce_minmax(d_logLuminance, size, 0);
max_logLum = reduce_minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int) * numBins;
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
histogram_kernel<<<hist_block_dim, thread_dim>>>(d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
unsigned int* h_out = new unsigned int[numBins];
cudaMemcpy(h_out, d_bins, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost);
unsigned int* h_cdf = new unsigned int[numBins];
h_cdf[0] = 0;
for (int i = 1; i < numBins; ++i) {
h_cdf[i] = h_out[i - 1] + h_cdf[i - 1];
}
checkCudaErrors(cudaMemcpy(d_cdf, h_cdf, sizeof(unsigned int) * numBins, cudaMemcpyHostToDevice));
// TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
} |
e18dc1a7cae621e1f410fa0e6ed97579df3b3258.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensor.cu"
#else
#include "THHThrustAllocator.cuh"
#include "THHTensor.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCTensor *THCSTensor_(toDense)(THCState *state, THCSTensor *self) {
THLongStorage *size;
THCTensor *dst;
// set up the new tensor
size = THCSTensor_(newSizeOf)(state, self);
dst = THCTensor_(newWithSize)(state, size, NULL);
THLongStorage_free(size);
THCTensor_(zero)(state, dst);
real one = ScalarConvert<int, real>::to(1);
THCSTensor_(spcadd)(state, dst, dst, one, self);
THCudaCheck(hipGetLastError());
return dst;
}
THCSTensor *THCSTensor_(newCoalesce)(THCState *state, THCSTensor *self) {
ptrdiff_t nnz = self->nnz;
if (nnz < 2) {
self->coalesced = 1;
}
if (self->coalesced) {
THCSTensor_(retain)(state, self);
return self;
}
#if TORCH_HIP_VERSION >= 7000
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
THCTensor *values_ = THCSTensor_(newValues)(state, self);
THCTensor *values = THCTensor_(newContiguous)(state, values_);
THCTensor_(free)(state, values_);
int nDimI = self->nDimensionI;
int64_t stride = values->stride[0];
hipStream_t stream = THCState_getCurrentStream(state);
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, self, 1);
THCIndexTensor *origIndices = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *uniqueOffsets = THCIndexTensor_(newWithSize1d)(state, nnz);
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(THCIndexTensor_(data)(state, indices1D));
thrust_ptr origIndicesIter(THCIndexTensor_(data)(state, origIndices));
thrust_ptr uniqueOffsetsIter(THCIndexTensor_(data)(state, uniqueOffsets));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE);
thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE);
THRUST_EXEC(thrust::copy, countIterI, countIterI + nnz, origIndicesIter);
THRUST_EXEC(thrust::copy, countIterO, countIterO + nnz, uniqueOffsetsIter);
THRUST_EXEC(thrust::sort_by_key,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = THRUST_EXEC(
thrust::unique_by_key,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
THCIndexTensor_(resize2d)(state, indices1D, 1, newNnz);
THCTensor *newValues = THCTensor_(new)(state);
THCTensor_(resizeNd)(state, newValues, values->dim(), values->size, NULL);
newValues->size[0] = newNnz;
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
hipLaunchKernelGGL(( THCSTensor_coalesceValuesKernel<real, accreal>), dim3(grid), dim3(block), 0, stream,
THCIndexTensor_(data)(state, uniqueOffsets),
THCIndexTensor_(data)(state, origIndices),
THCTensor_(data)(state, values),
THCTensor_(data)(state, newValues),
nnz,
newNnz,
stride
);
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
THCIndexTensor_(free)(state, origIndices);
THCIndexTensor_(free)(state, uniqueOffsets);
////////////////////////////////////////////////////////////
// unflatten indices if necessary
THCIndexTensor *newIndices;
if (nDimI == 1) {
newIndices = indices1D;
} else {
newIndices = THCIndexTensor_(newWithSize2d)(state, nDimI, newNnz);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -1);
}
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, newIndices, 0, d);
THCIndexTensor_(copy)(state, indicesSlice, indices1D);
THCIndexTensor_(div)(state, indices1D, indices1D, self->size[d]);
THCIndexTensor_(cadd)(state, indicesSlice, indicesSlice, -self->size[d], indices1D);
}
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, newIndices, newIndices, 1);
}
THCIndexTensor_(free)(state, indices1D);
THCIndexTensor_(free)(state, indicesSlice);
}
////////////////////////////////////////////////////////////
THLongStorage *size = THCSTensor_(newSizeOf)(state, self);
THCSTensor *dst = THCSTensor_(newWithTensorAndSize)(state, newIndices, newValues, size);
THLongStorage_free(size);
THCTensor_(free)(state, values);
THCIndexTensor_(free)(state, newIndices);
THCTensor_(free)(state, newValues);
dst->coalesced = 1;
THCudaCheck(hipGetLastError());
return dst;
#undef THRUST_EXEC
}
// forceClone is intended to use as a boolean, if set, the result will forced to
// be a clone of self.
THCIndexTensor* THCSTensor_(newFlattenedIndices)(THCState *state, THCSTensor *self, int forceClone) {
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int nDimI = self->nDimensionI;
if (nDimI == 1) {
if (forceClone) {
THCIndexTensor *indices_clone = THCIndexTensor_(newClone)(state, indices);
THCIndexTensor_(free)(state, indices);
return indices_clone;
} else {
return indices;
}
} else {
// FIXME TH_INDEX_BASE
int64_t factor = 1;
THCIndexTensor *indices1D = THCIndexTensor_(newWithSize2d)(state, 1, self->nnz);
THCIndexTensor_(fill)(state, indices1D, TH_INDEX_BASE);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, indices, 0, d);
THCIndexTensor_(cadd)(state, indices1D, indices1D, factor, indicesSlice);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -TH_INDEX_BASE);
}
factor *= self->size[d];
}
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, indicesSlice);
return indices1D;
}
}
// In place transpose
void THCSTensor_(transpose)(THCState *state, THCSTensor *self, int d1, int d2) {
int64_t nDimI = THCSTensor_(nDimensionI)(state, self);
int64_t nDimV = THCSTensor_(nDimensionV)(state, self);
THArgCheck(d1 < nDimI && d2 < nDimI, 1, "Transposed dimensions should be sparse. Got nDimI: %ld, d1: %ld, d2: %ld", nDimI, d1, d2);
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int64_t nnz = THCSTensor_(nnz)(state, self);
THCIndexTensor *buffer = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *slice1 = THCIndexTensor_(newSelect)(state, indices, 0, d1);
THCIndexTensor *slice2 = THCIndexTensor_(newSelect)(state, indices, 0, d2);
THCIndexTensor_(copy)(state, buffer, slice1);
THCIndexTensor_(copy)(state, slice1, slice2);
THCIndexTensor_(copy)(state, slice2, buffer);
int64_t i = self->size[d1];
self->size[d1] = self->size[d2];
self->size[d2] = i;
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, buffer);
THCIndexTensor_(free)(state, slice1);
THCIndexTensor_(free)(state, slice2);
}
int THCSTensor_(getDevice)(THCState* state, const THCSTensor* tensor) {
if (!tensor->values || !tensor->values->storage) return -1;
return THCStorage_(getDevice)(state, tensor->values->storage);
}
#endif
| e18dc1a7cae621e1f410fa0e6ed97579df3b3258.cu | #ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensor.cu"
#else
#include "THCThrustAllocator.cuh"
#include "THCTensor.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCTensor *THCSTensor_(toDense)(THCState *state, THCSTensor *self) {
THLongStorage *size;
THCTensor *dst;
// set up the new tensor
size = THCSTensor_(newSizeOf)(state, self);
dst = THCTensor_(newWithSize)(state, size, NULL);
THLongStorage_free(size);
THCTensor_(zero)(state, dst);
real one = ScalarConvert<int, real>::to(1);
THCSTensor_(spcadd)(state, dst, dst, one, self);
THCudaCheck(cudaGetLastError());
return dst;
}
THCSTensor *THCSTensor_(newCoalesce)(THCState *state, THCSTensor *self) {
ptrdiff_t nnz = self->nnz;
if (nnz < 2) {
self->coalesced = 1;
}
if (self->coalesced) {
THCSTensor_(retain)(state, self);
return self;
}
#if CUDA_VERSION >= 7000
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
THCTensor *values_ = THCSTensor_(newValues)(state, self);
THCTensor *values = THCTensor_(newContiguous)(state, values_);
THCTensor_(free)(state, values_);
int nDimI = self->nDimensionI;
int64_t stride = values->stride[0];
cudaStream_t stream = THCState_getCurrentStream(state);
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, self, 1);
THCIndexTensor *origIndices = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *uniqueOffsets = THCIndexTensor_(newWithSize1d)(state, nnz);
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(THCIndexTensor_(data)(state, indices1D));
thrust_ptr origIndicesIter(THCIndexTensor_(data)(state, origIndices));
thrust_ptr uniqueOffsetsIter(THCIndexTensor_(data)(state, uniqueOffsets));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE);
thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE);
THRUST_EXEC(thrust::copy, countIterI, countIterI + nnz, origIndicesIter);
THRUST_EXEC(thrust::copy, countIterO, countIterO + nnz, uniqueOffsetsIter);
THRUST_EXEC(thrust::sort_by_key,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = THRUST_EXEC(
thrust::unique_by_key,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
THCIndexTensor_(resize2d)(state, indices1D, 1, newNnz);
THCTensor *newValues = THCTensor_(new)(state);
THCTensor_(resizeNd)(state, newValues, values->dim(), values->size, NULL);
newValues->size[0] = newNnz;
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
THCSTensor_coalesceValuesKernel<real, accreal><<<grid, block, 0, stream>>>(
THCIndexTensor_(data)(state, uniqueOffsets),
THCIndexTensor_(data)(state, origIndices),
THCTensor_(data)(state, values),
THCTensor_(data)(state, newValues),
nnz,
newNnz,
stride
);
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
THCIndexTensor_(free)(state, origIndices);
THCIndexTensor_(free)(state, uniqueOffsets);
////////////////////////////////////////////////////////////
// unflatten indices if necessary
THCIndexTensor *newIndices;
if (nDimI == 1) {
newIndices = indices1D;
} else {
newIndices = THCIndexTensor_(newWithSize2d)(state, nDimI, newNnz);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -1);
}
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, newIndices, 0, d);
THCIndexTensor_(copy)(state, indicesSlice, indices1D);
THCIndexTensor_(div)(state, indices1D, indices1D, self->size[d]);
THCIndexTensor_(cadd)(state, indicesSlice, indicesSlice, -self->size[d], indices1D);
}
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, newIndices, newIndices, 1);
}
THCIndexTensor_(free)(state, indices1D);
THCIndexTensor_(free)(state, indicesSlice);
}
////////////////////////////////////////////////////////////
THLongStorage *size = THCSTensor_(newSizeOf)(state, self);
THCSTensor *dst = THCSTensor_(newWithTensorAndSize)(state, newIndices, newValues, size);
THLongStorage_free(size);
THCTensor_(free)(state, values);
THCIndexTensor_(free)(state, newIndices);
THCTensor_(free)(state, newValues);
dst->coalesced = 1;
THCudaCheck(cudaGetLastError());
return dst;
#undef THRUST_EXEC
}
// forceClone is intended to use as a boolean, if set, the result will forced to
// be a clone of self.
THCIndexTensor* THCSTensor_(newFlattenedIndices)(THCState *state, THCSTensor *self, int forceClone) {
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int nDimI = self->nDimensionI;
if (nDimI == 1) {
if (forceClone) {
THCIndexTensor *indices_clone = THCIndexTensor_(newClone)(state, indices);
THCIndexTensor_(free)(state, indices);
return indices_clone;
} else {
return indices;
}
} else {
// FIXME TH_INDEX_BASE
int64_t factor = 1;
THCIndexTensor *indices1D = THCIndexTensor_(newWithSize2d)(state, 1, self->nnz);
THCIndexTensor_(fill)(state, indices1D, TH_INDEX_BASE);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, indices, 0, d);
THCIndexTensor_(cadd)(state, indices1D, indices1D, factor, indicesSlice);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -TH_INDEX_BASE);
}
factor *= self->size[d];
}
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, indicesSlice);
return indices1D;
}
}
// In place transpose
void THCSTensor_(transpose)(THCState *state, THCSTensor *self, int d1, int d2) {
int64_t nDimI = THCSTensor_(nDimensionI)(state, self);
int64_t nDimV = THCSTensor_(nDimensionV)(state, self);
THArgCheck(d1 < nDimI && d2 < nDimI, 1, "Transposed dimensions should be sparse. Got nDimI: %ld, d1: %ld, d2: %ld", nDimI, d1, d2);
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int64_t nnz = THCSTensor_(nnz)(state, self);
THCIndexTensor *buffer = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *slice1 = THCIndexTensor_(newSelect)(state, indices, 0, d1);
THCIndexTensor *slice2 = THCIndexTensor_(newSelect)(state, indices, 0, d2);
THCIndexTensor_(copy)(state, buffer, slice1);
THCIndexTensor_(copy)(state, slice1, slice2);
THCIndexTensor_(copy)(state, slice2, buffer);
int64_t i = self->size[d1];
self->size[d1] = self->size[d2];
self->size[d2] = i;
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, buffer);
THCIndexTensor_(free)(state, slice1);
THCIndexTensor_(free)(state, slice2);
}
int THCSTensor_(getDevice)(THCState* state, const THCSTensor* tensor) {
if (!tensor->values || !tensor->values->storage) return -1;
return THCStorage_(getDevice)(state, tensor->values->storage);
}
#endif
|
c8adc7d5c802f1d5af4ef1a12cbf2eab5c14f5e0.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| c8adc7d5c802f1d5af4ef1a12cbf2eab5c14f5e0.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
c75f0d59ec4963d97c7ae413dca53d70fee420b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#ifndef _VectorAdd_KERNEL_H_
#define _VectorAdd_KERNEL_H_
#include "CUDA_Kernels.h"
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
// Device code
extern "C" void VecAdd_kernelGPU(
float *d_Src1,
float *d_Src2,
float *d_Dst,
int numElements
)
{
// Launch the Vector Add CUDA Kernel
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
//hipSetDevice(GPU0);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Src1, d_Src2, d_Dst, numElements);
//err = hipGetLastError();
}
#endif | c75f0d59ec4963d97c7ae413dca53d70fee420b9.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#ifndef _VectorAdd_KERNEL_H_
#define _VectorAdd_KERNEL_H_
#include "CUDA_Kernels.h"
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
// Device code
extern "C" void VecAdd_kernelGPU(
float *d_Src1,
float *d_Src2,
float *d_Dst,
int numElements
)
{
// Launch the Vector Add CUDA Kernel
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
//cudaSetDevice(GPU0);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_Src1, d_Src2, d_Dst, numElements);
//err = cudaGetLastError();
}
#endif |
1cb75bd5fbc799df094a6158004acb1d54a67716.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mmio.h"
#define BlockDim 1024
#define MAX_NUM_THREADS_PER_BLOCK 1024
#define ITER 3
template <typename T, int THREADS_PER_VECTOR, int MAX_NUM_VECTORS_PER_BLOCK>
__global__ void spmv_light_kernel(int* cudaRowCounter, int* d_ptr, int* d_cols,T* d_val, T* d_vector, T* d_out,int N) {
int i;
T sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % THREADS_PER_VECTOR; //lane index in the vector
int vectorId = threadIdx.x / THREADS_PER_VECTOR; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / THREADS_PER_VECTOR; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / THREADS_PER_VECTOR);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (THREADS_PER_VECTOR == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (THREADS_PER_VECTOR - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += THREADS_PER_VECTOR; i < rowEnd; i += THREADS_PER_VECTOR) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i +=
THREADS_PER_VECTOR) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = THREADS_PER_VECTOR >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = sum;
}
// Get a new row index
if(warpLaneId == 0){
row = atomicAdd(cudaRowCounter, 32 / THREADS_PER_VECTOR);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
template <typename T>
void spmv_light(MatrixInfo<T> * mat,T *vector,T *out)
{
T *d_vector,*d_val, *d_out;
int *d_cols, *d_ptr;
float time_taken;
double gflop = 2 * (double) mat->nz / 1e9;
float milliseconds = 0;
int meanElementsPerRow = mat->nz/mat->M;
int *cudaRowCounter;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Allocate memory on device
hipMalloc(&d_vector,mat->N*sizeof(T));
hipMalloc(&d_val,mat->nz*sizeof(T));
hipMalloc(&d_out,mat->M*sizeof(T));
hipMalloc(&d_cols,mat->nz*sizeof(int));
hipMalloc(&d_ptr,(mat->M+1)*sizeof(int));
hipMalloc(&cudaRowCounter, sizeof(int));
// Copy from host memory to device memory
hipMemcpy(d_vector,vector,mat->N*sizeof(T),hipMemcpyHostToDevice);
hipMemcpy(d_val,mat->val,mat->nz*sizeof(T),hipMemcpyHostToDevice);
hipMemcpy(d_cols,mat->cIndex,mat->nz*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_ptr,mat->rIndex,(mat->M+1)*sizeof(int),hipMemcpyHostToDevice);
hipMemset(d_out, 0, mat->M*sizeof(T));
hipMemset(cudaRowCounter, 0, sizeof(int));
// Choose the vector size depending on the NNZ/Row, run the kernel and time it
hipEventRecord(start);
if (meanElementsPerRow <= 2) {
for (int i = 0; i < ITER; i++) {
hipLaunchKernelGGL(( spmv_light_kernel<T, 2, MAX_NUM_THREADS_PER_BLOCK / 2>), dim3(ceil(mat->M/(float)BlockDim)), dim3(BlockDim), 0, 0,
cudaRowCounter, d_ptr, d_cols,d_val,d_vector,d_out,mat->M);
hipMemset(cudaRowCounter, 0, sizeof(int));
}
} else if (meanElementsPerRow <= 4) {
for (int i = 0; i < ITER; i++) {
hipLaunchKernelGGL(( spmv_light_kernel<T, 4, MAX_NUM_THREADS_PER_BLOCK / 4>), dim3(ceil(mat->M/(float)BlockDim)), dim3(BlockDim), 0, 0,
cudaRowCounter, d_ptr, d_cols,d_val, d_vector, d_out,mat->M);
hipMemset(cudaRowCounter, 0, sizeof(int));
}
} else if(meanElementsPerRow <= 64) {
for (int i = 0; i < ITER; i++) {
hipLaunchKernelGGL(( spmv_light_kernel<T, 8, MAX_NUM_THREADS_PER_BLOCK / 8>), dim3(ceil(mat->M/(float)BlockDim)), dim3(BlockDim), 0, 0,
cudaRowCounter,d_ptr,d_cols,d_val, d_vector, d_out,mat->M);
hipMemset(cudaRowCounter, 0, sizeof(int));
}
} else {
for (int i = 0; i < ITER; i++){
hipLaunchKernelGGL(( spmv_light_kernel<T, 32, MAX_NUM_THREADS_PER_BLOCK / 32>), dim3(ceil(mat->M/(float)BlockDim)), dim3(BlockDim), 0, 0,
cudaRowCounter, d_ptr, d_cols,d_val, d_vector, d_out,mat->M);
hipMemset(cudaRowCounter, 0, sizeof(int));
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
// Copy from device memory to host memory
hipMemcpy(out, d_out, mat->M*sizeof(T), hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_vector);
hipFree(d_val);
hipFree(d_cols);
hipFree(d_ptr);
hipFree(d_out);
// Calculate and print out GFLOPs and GB/s
double gbs = ((mat->N * sizeof(T)) + (mat->nz*sizeof(T)) + (mat->M*sizeof(int)) + (mat->nz*sizeof(int)) + (mat->M*sizeof(T))) / (milliseconds/ITER) / 1e6;
time_taken = (milliseconds/ITER)/1000.0;
printf("Average time taken for %s is %f\n", "SpMV by GPU CSR LightSpMV Algorithm",time_taken);
printf("Average GFLOP/s is %lf\n",gflop/time_taken);
printf("Average GB/s is %lf\n\n",gbs);
}
| 1cb75bd5fbc799df094a6158004acb1d54a67716.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mmio.h"
#define BlockDim 1024
#define MAX_NUM_THREADS_PER_BLOCK 1024
#define ITER 3
template <typename T, int THREADS_PER_VECTOR, int MAX_NUM_VECTORS_PER_BLOCK>
__global__ void spmv_light_kernel(int* cudaRowCounter, int* d_ptr, int* d_cols,T* d_val, T* d_vector, T* d_out,int N) {
int i;
T sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % THREADS_PER_VECTOR; //lane index in the vector
int vectorId = threadIdx.x / THREADS_PER_VECTOR; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / THREADS_PER_VECTOR; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / THREADS_PER_VECTOR);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (THREADS_PER_VECTOR == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (THREADS_PER_VECTOR - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += THREADS_PER_VECTOR; i < rowEnd; i += THREADS_PER_VECTOR) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i +=
THREADS_PER_VECTOR) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = THREADS_PER_VECTOR >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = sum;
}
// Get a new row index
if(warpLaneId == 0){
row = atomicAdd(cudaRowCounter, 32 / THREADS_PER_VECTOR);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
template <typename T>
void spmv_light(MatrixInfo<T> * mat,T *vector,T *out)
{
T *d_vector,*d_val, *d_out;
int *d_cols, *d_ptr;
float time_taken;
double gflop = 2 * (double) mat->nz / 1e9;
float milliseconds = 0;
int meanElementsPerRow = mat->nz/mat->M;
int *cudaRowCounter;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate memory on device
cudaMalloc(&d_vector,mat->N*sizeof(T));
cudaMalloc(&d_val,mat->nz*sizeof(T));
cudaMalloc(&d_out,mat->M*sizeof(T));
cudaMalloc(&d_cols,mat->nz*sizeof(int));
cudaMalloc(&d_ptr,(mat->M+1)*sizeof(int));
cudaMalloc(&cudaRowCounter, sizeof(int));
// Copy from host memory to device memory
cudaMemcpy(d_vector,vector,mat->N*sizeof(T),cudaMemcpyHostToDevice);
cudaMemcpy(d_val,mat->val,mat->nz*sizeof(T),cudaMemcpyHostToDevice);
cudaMemcpy(d_cols,mat->cIndex,mat->nz*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_ptr,mat->rIndex,(mat->M+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, mat->M*sizeof(T));
cudaMemset(cudaRowCounter, 0, sizeof(int));
// Choose the vector size depending on the NNZ/Row, run the kernel and time it
cudaEventRecord(start);
if (meanElementsPerRow <= 2) {
for (int i = 0; i < ITER; i++) {
spmv_light_kernel<T, 2, MAX_NUM_THREADS_PER_BLOCK / 2><<<ceil(mat->M/(float)BlockDim), BlockDim>>>(
cudaRowCounter, d_ptr, d_cols,d_val,d_vector,d_out,mat->M);
cudaMemset(cudaRowCounter, 0, sizeof(int));
}
} else if (meanElementsPerRow <= 4) {
for (int i = 0; i < ITER; i++) {
spmv_light_kernel<T, 4, MAX_NUM_THREADS_PER_BLOCK / 4><<<ceil(mat->M/(float)BlockDim), BlockDim>>>(
cudaRowCounter, d_ptr, d_cols,d_val, d_vector, d_out,mat->M);
cudaMemset(cudaRowCounter, 0, sizeof(int));
}
} else if(meanElementsPerRow <= 64) {
for (int i = 0; i < ITER; i++) {
spmv_light_kernel<T, 8, MAX_NUM_THREADS_PER_BLOCK / 8><<<ceil(mat->M/(float)BlockDim), BlockDim>>>(
cudaRowCounter,d_ptr,d_cols,d_val, d_vector, d_out,mat->M);
cudaMemset(cudaRowCounter, 0, sizeof(int));
}
} else {
for (int i = 0; i < ITER; i++){
spmv_light_kernel<T, 32, MAX_NUM_THREADS_PER_BLOCK / 32><<<ceil(mat->M/(float)BlockDim), BlockDim>>>(
cudaRowCounter, d_ptr, d_cols,d_val, d_vector, d_out,mat->M);
cudaMemset(cudaRowCounter, 0, sizeof(int));
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// Copy from device memory to host memory
cudaMemcpy(out, d_out, mat->M*sizeof(T), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_vector);
cudaFree(d_val);
cudaFree(d_cols);
cudaFree(d_ptr);
cudaFree(d_out);
// Calculate and print out GFLOPs and GB/s
double gbs = ((mat->N * sizeof(T)) + (mat->nz*sizeof(T)) + (mat->M*sizeof(int)) + (mat->nz*sizeof(int)) + (mat->M*sizeof(T))) / (milliseconds/ITER) / 1e6;
time_taken = (milliseconds/ITER)/1000.0;
printf("Average time taken for %s is %f\n", "SpMV by GPU CSR LightSpMV Algorithm",time_taken);
printf("Average GFLOP/s is %lf\n",gflop/time_taken);
printf("Average GB/s is %lf\n\n",gbs);
}
|
94acffc1247105ee2ff29170af562f24ab9f09a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012 by Jrn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "smooth3.h"
#include "CAccum.h"
/*
__global__
void smooth3d_swap_kernel(
const uchar4* d_input, const CExtent extentInput,
uchar4* d_output, const CExtent extentOutput,
const int windowSize, const int3 offset, const CExtent realExtent) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = ..., const int z = ...
if (extentOutput.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
... dy, dx ...
int nx=x+dx; int ny=y+dy; int nz=z+dz;
if (realExtent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extentInput.index(nx, ny, nz)]);
}
}
d_output[extentOutput.index(x, y, z)] = acc.avg();
}
}
*/
__global__
void smooth3d_swap_kernel(const uchar4* d_input,
const CExtent extentInput,
uchar4* d_output,
const CExtent extentOutput,
const int windowSize,
const int3 offset,
const CExtent realExtent) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (extentOutput.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
for (int dy = -windowSize; dy <= windowSize; dy ++) {
for (int dx = -windowSize; dx <= windowSize; dx ++) {
const int nx = x + dx + offset.x;
const int ny = y + dy + offset.y;
const int nz = z + dz + offset.z;
if (realExtent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extentInput.index(nx, ny, nz)]);
}
}
}
}
d_output[extentOutput.index(x, y, z)] = acc.avg();
//d_output[extentOutput.index(x, y, z)] = make_uchar4(acc.count, offset.z, 0, 99);
}
}
//d_output[extentOutput.index(x, y, z)] = make_uchar4(acc.count, offset.z, 0, 99);
//d_output[extentOutput.index(x, y, z)] = acc.avg();
//d_output[extentOutput.index(x, y, z)] = make_uchar4(241, 0, 0, 0);
void smooth3d_swap(const CExecConfig& config,
const CDeviceBuffer<uchar4>& input,
const CDeviceBuffer<uchar4>& output,
const int windowSize,
const int3& offset,
const CExtent& realExtent) {
hipLaunchKernelGGL(( smooth3d_swap_kernel), dim3(config.grid),dim3(config.threads),0,config.stream, input.getPtr(), input, output.getPtr(), output, windowSize, offset, realExtent);
}
| 94acffc1247105ee2ff29170af562f24ab9f09a3.cu | /*
* Copyright (c) 2012 by Jörn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "smooth3.h"
#include "CAccum.h"
/*
__global__
void smooth3d_swap_kernel(
const uchar4* d_input, const CExtent extentInput,
uchar4* d_output, const CExtent extentOutput,
const int windowSize, const int3 offset, const CExtent realExtent) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = ..., const int z = ...
if (extentOutput.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
... dy, dx ...
int nx=x+dx; int ny=y+dy; int nz=z+dz;
if (realExtent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extentInput.index(nx, ny, nz)]);
}
}
d_output[extentOutput.index(x, y, z)] = acc.avg();
}
}
*/
__global__
void smooth3d_swap_kernel(const uchar4* d_input,
const CExtent extentInput,
uchar4* d_output,
const CExtent extentOutput,
const int windowSize,
const int3 offset,
const CExtent realExtent) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (extentOutput.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
for (int dy = -windowSize; dy <= windowSize; dy ++) {
for (int dx = -windowSize; dx <= windowSize; dx ++) {
const int nx = x + dx + offset.x;
const int ny = y + dy + offset.y;
const int nz = z + dz + offset.z;
if (realExtent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extentInput.index(nx, ny, nz)]);
}
}
}
}
d_output[extentOutput.index(x, y, z)] = acc.avg();
//d_output[extentOutput.index(x, y, z)] = make_uchar4(acc.count, offset.z, 0, 99);
}
}
//d_output[extentOutput.index(x, y, z)] = make_uchar4(acc.count, offset.z, 0, 99);
//d_output[extentOutput.index(x, y, z)] = acc.avg();
//d_output[extentOutput.index(x, y, z)] = make_uchar4(241, 0, 0, 0);
void smooth3d_swap(const CExecConfig& config,
const CDeviceBuffer<uchar4>& input,
const CDeviceBuffer<uchar4>& output,
const int windowSize,
const int3& offset,
const CExtent& realExtent) {
smooth3d_swap_kernel<<<config.grid,config.threads,0,config.stream>>>(input.getPtr(), input, output.getPtr(), output, windowSize, offset, realExtent);
}
|
3914bcb2ca6009ff95a63623843b492855f7eeba.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "cuda_utils.cuh"
__host__ void random_init(data_t* data,
size_t size_in_bytes,
unsigned long long seed) {
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, seed);
hiprandGenerate(gen, (unsigned int*)data,
size_in_bytes / sizeof(unsigned int));
hiprandDestroyGenerator(gen);
}
| 3914bcb2ca6009ff95a63623843b492855f7eeba.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include "cuda_utils.cuh"
__host__ void random_init(data_t* data,
size_t size_in_bytes,
unsigned long long seed) {
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, seed);
curandGenerate(gen, (unsigned int*)data,
size_in_bytes / sizeof(unsigned int));
curandDestroyGenerator(gen);
}
|
f653e604d03b6254f0acc72c163b6f62b6b17f4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part3: implement the kernel
__global__ void max_parallel(double *cmax, double *temp,int ndimp, double maxac)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ double partialResult[];
int i;
partialResult[tid]=0.0;
if(iindex<ndimp)
partialResult[tid]=temp[iindex];
__syncthreads();
// if(temp[iindex]==maxac)
// printf("max here %d %d \n",tid,blockIdx.x);
//if(tid==0)
// printf("sero %d\n",blockIdx.x);
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0) {
if(partialResult[tid+s]>partialResult[tid])
partialResult[tid]=partialResult[tid + s];
}
__syncthreads();
}
__syncthreads();
if(tid==0)
{
cmax[blockIdx.x]=partialResult[0];
//temp[blockIdx.x]=partialResult[0];
}
__syncthreads();
}
/////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
double *h_c, *h_temp;
double maxc=-1.0;
double tmax=-1.0;
int dimA = 256 * 1024; // 256K elements (1MB total)
dimA=256*2048;
dimA=2097152;
// pointer for device memory
int *d_b, *d_a;
double *d_c, *d_temp;
// define grid and block size
int numThreadsPerBlock = 128;
// Part 1: compute number of blocks needed based on
// array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
srand (time(NULL));
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
hipMalloc( (void **) &d_b, memSize );
int smemSize = numThreadsPerBlock * sizeof(double);
size_t dmemSize = numBlocks * numThreadsPerBlock * sizeof(double);
h_c = (double *) malloc(dmemSize);
h_temp = (double *) malloc(dmemSize);
hipMalloc( (void **) &d_c, dmemSize );
hipMalloc( (void **) &d_temp, dmemSize );
int imax;
int ccount=0;
int j=0;
// for( j=0; j<1000; j++)
// {
// tmax=-1;
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
h_c[i]=(rand()%100000000);
if(h_c[i]>tmax)
{
tmax=h_c[i];
imax=i;
}
//printf(" %g ",h_c[i]);
}
printf("\n\n\n %d %f %d\n", dimA, tmax, imax);
// Copy host array to device array
hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice );
hipMemcpy( d_c, h_c, dmemSize, hipMemcpyHostToDevice );
hipMemcpy( d_temp, h_c, dmemSize, hipMemcpyHostToDevice );
// device to host copy
hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
for(int i=0;i<numBlocks;i++)
h_temp[i]=0;
hipMemcpy(d_temp, h_temp, numBlocks*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max_parallel), dim3(numBlocks),dim3(numThreadsPerBlock),smemSize, 0, d_temp,d_c,dimA,tmax);
hipDeviceSynchronize();
hipMemcpy(h_temp, d_temp, numBlocks*sizeof(double), hipMemcpyDeviceToHost);
for(int i=0;i<numBlocks;i++)
{
if(h_temp[i]>maxc) maxc=h_temp[i];
//printf(" %f ",h_temp[i]);
}
if(maxc==tmax) ccount++;
printf("\n\n\nnumblocks %d %d max=%f %f %d\n",j, numBlocks, maxc, tmax, ccount);
// }
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are
// correct and there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| f653e604d03b6254f0acc72c163b6f62b6b17f4c.cu | // includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part3: implement the kernel
__global__ void max_parallel(double *cmax, double *temp,int ndimp, double maxac)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ double partialResult[];
int i;
partialResult[tid]=0.0;
if(iindex<ndimp)
partialResult[tid]=temp[iindex];
__syncthreads();
// if(temp[iindex]==maxac)
// printf("max here %d %d \n",tid,blockIdx.x);
//if(tid==0)
// printf("sero %d\n",blockIdx.x);
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0) {
if(partialResult[tid+s]>partialResult[tid])
partialResult[tid]=partialResult[tid + s];
}
__syncthreads();
}
__syncthreads();
if(tid==0)
{
cmax[blockIdx.x]=partialResult[0];
//temp[blockIdx.x]=partialResult[0];
}
__syncthreads();
}
/////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
double *h_c, *h_temp;
double maxc=-1.0;
double tmax=-1.0;
int dimA = 256 * 1024; // 256K elements (1MB total)
dimA=256*2048;
dimA=2097152;
// pointer for device memory
int *d_b, *d_a;
double *d_c, *d_temp;
// define grid and block size
int numThreadsPerBlock = 128;
// Part 1: compute number of blocks needed based on
// array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
srand (time(NULL));
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
int smemSize = numThreadsPerBlock * sizeof(double);
size_t dmemSize = numBlocks * numThreadsPerBlock * sizeof(double);
h_c = (double *) malloc(dmemSize);
h_temp = (double *) malloc(dmemSize);
cudaMalloc( (void **) &d_c, dmemSize );
cudaMalloc( (void **) &d_temp, dmemSize );
int imax;
int ccount=0;
int j=0;
// for( j=0; j<1000; j++)
// {
// tmax=-1;
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
h_c[i]=(rand()%100000000);
if(h_c[i]>tmax)
{
tmax=h_c[i];
imax=i;
}
//printf(" %g ",h_c[i]);
}
printf("\n\n\n %d %f %d\n", dimA, tmax, imax);
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_c, h_c, dmemSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_temp, h_c, dmemSize, cudaMemcpyHostToDevice );
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
for(int i=0;i<numBlocks;i++)
h_temp[i]=0;
cudaMemcpy(d_temp, h_temp, numBlocks*sizeof(double), cudaMemcpyHostToDevice);
max_parallel<<<numBlocks,numThreadsPerBlock,smemSize>>>(d_temp,d_c,dimA,tmax);
cudaThreadSynchronize();
cudaMemcpy(h_temp, d_temp, numBlocks*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<numBlocks;i++)
{
if(h_temp[i]>maxc) maxc=h_temp[i];
//printf(" %f ",h_temp[i]);
}
if(maxc==tmax) ccount++;
printf("\n\n\nnumblocks %d %d max=%f %f %d\n",j, numBlocks, maxc, tmax, ccount);
// }
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are
// correct and there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
4b13049b1ed1cf3529749e1ee991d418211360b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace bbp_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_gSKv3_1bar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
_pp_var_m[tid_] = 1.0/( 1.0+exp(( 18.699999999999999-v)* 0.10309278350515465));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_0_, mRho, ba_0_, ll0_, mInf, ll1_;
ll1_ = 0.;
ll0_ = 0.;
mInf = 1.0/( 1.0+exp(( 18.699999999999999-v)* 0.10309278350515465));
mRho = 0.25*( 1.0+exp((v+ 46.560000000000002)* -0.022655188038060714));
a_0_ = -1.0*mRho;
ba_0_ = mInf*mRho/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gSKv3_1bar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gSKv3_1bar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_SKv3_1_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p);
}
void mechanism_SKv3_1_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_SKv3_1_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_SKv3_1_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_SKv3_1_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_SKv3_1_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace bbp_catalogue
} // namespace arb
| 4b13049b1ed1cf3529749e1ee991d418211360b8.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace bbp_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_gSKv3_1bar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
_pp_var_m[tid_] = 1.0/( 1.0+exp(( 18.699999999999999-v)* 0.10309278350515465));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_0_, mRho, ba_0_, ll0_, mInf, ll1_;
ll1_ = 0.;
ll0_ = 0.;
mInf = 1.0/( 1.0+exp(( 18.699999999999999-v)* 0.10309278350515465));
mRho = 0.25*( 1.0+exp((v+ 46.560000000000002)* -0.022655188038060714));
a_0_ = -1.0*mRho;
ba_0_ = mInf*mRho/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gSKv3_1bar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gSKv3_1bar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_SKv3_1_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p);
}
void mechanism_SKv3_1_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_SKv3_1_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_SKv3_1_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_SKv3_1_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_SKv3_1_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace bbp_catalogue
} // namespace arb
|
6ea78a88333f9acc62b2361f7e97da08ae6d772f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file is subject to the terms and conditions defined in
* file 'LICENSE.txt', which is part of this source code package.
*/
#include <cutil_inline.h>
const int THREADS_PER_BLOCK = 256; // Number of threads per block
const int FILTER_BUFF_LENGTH = 64; // Length of constant memory buffer used for filters coefficients
const int FIXED_ROW = 0;
const int FIXED_COLUMN = 1;
const int FIXED_FRAME = 2;
__constant__ float lowPassFilter[FILTER_BUFF_LENGTH]; // Constant memory buffer used for high pass filter coefficients
__constant__ float highPassFilter[FILTER_BUFF_LENGTH]; // Constant memory buffer used for low pass filter coefficients
__device__ inline int getMappedIndex(int index,
int inputLength,
bool ignoreOddIndex,
bool ignoreEvenIndex) {
int newIndex = -1;
// check if index is in range [0, length)
if(index >= 0 && index < inputLength) {
newIndex = index;
}
else {
if(index < 0) {
while(index < -inputLength) {
index += inputLength;
}
newIndex = inputLength + index;
}
else if(index >= inputLength) {
newIndex = index;
while(newIndex >= inputLength) {
newIndex = newIndex - inputLength;
}
}
}
if(ignoreOddIndex) {
if(abs(newIndex) % 2 == 1) {
return -1;
}
return newIndex;
}
if(ignoreEvenIndex) {
if(abs(newIndex) % 2 == 0) {
return -1;
}
return newIndex;
}
return newIndex;
}
__device__ inline float getInputValue1D(const float *input,
int index,
int inputLength,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = getMappedIndex(index, inputLength, ignoreOddIndex, ignoreEvenIndex);
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
return input[newIndex/2 + offset];
}
return input[newIndex];
}
__device__ inline float getInputValue2D(const float *input,
int x,
int y,
bool fixedRow,
int width,
int currWidth,
int currHeight,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = getMappedIndex(fixedRow ? x : y, fixedRow ? currWidth : currHeight,
ignoreOddIndex, ignoreEvenIndex);
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
if(fixedRow) {
return input[y * width + newIndex/2 + offset];
}
return input[(newIndex/2 + offset) * width + x];
}
return fixedRow ? input[y * width + newIndex] : input[newIndex * width + x];
}
__device__ inline float getInputValue3D(const float *input,
int x,
int y,
int z,
int fixedDim,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = -1;
if(fixedDim == FIXED_ROW) {
newIndex = getMappedIndex(x, currWidth, ignoreOddIndex, ignoreEvenIndex);
}
else if(fixedDim == FIXED_COLUMN) {
newIndex = getMappedIndex(y, currHeight, ignoreOddIndex, ignoreEvenIndex);
}
else {
newIndex = getMappedIndex(z, currFrames, ignoreOddIndex, ignoreEvenIndex);
}
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
if(fixedDim == FIXED_ROW) {
return input[z * width * height + y * width + newIndex/2 + offset];
} else if(fixedDim == FIXED_COLUMN) {
return input[z * width * height + (newIndex/2 + offset) * width + x];
} else {
return input[(newIndex/2 + offset) * width * height + y * width + x];
}
}
if(fixedDim == FIXED_ROW) {
return input[z * width * height + y * width + newIndex];
} else if(fixedDim == FIXED_COLUMN) {
return input[z * width * height + newIndex * width + x];
}
return input[newIndex * width * height + y * width + x];
}
__device__ inline float forwardStepLow(float *input,
int inputOffset,
int i,
int analysisLowLength,
int analysisLowFirstIndex) {
float value = 0.0f;
// Convolve with low pass analysis filter - aproximation coefficient
for(int j = 0; j < analysisLowLength; ++j) {
int k = (2 * i) + j - analysisLowFirstIndex;
float inputValue = input[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * lowPassFilter[j];
}
return value;
}
__device__ inline float forwardStepHigh(float *input,
int inputOffset,
int i,
int analysisHighLength,
int analysisHighFirstIndex) {
float value = 0.0f;
// Convolve with high pass analysis filter - detail coefficient
for(int j = 0; j < analysisHighLength; ++j) {
int k = (2 * i) + j - analysisHighFirstIndex;
float inputValue = input[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * highPassFilter[j];
}
return value;
}
__device__ inline float reverseStep(float *lowInput,
float *highInput,
int inputOffset,
int i,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
float value = 0.0f;
// Convolve with low pass synthesis filter
for(int j = 0; j < synthesisLowLength; ++j) {
int k = i - j + synthesisLowFirstIndex;
float inputValue = lowInput[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * lowPassFilter[j];
}
// Convolve with high pass synthesis filter
for(int j = 0; j < synthesisHighLength; ++j) {
int k = i - j + synthesisHighFirstIndex;
float inputValue = highInput[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * highPassFilter[j];
}
return value;
}
__device__ __host__ inline int getFilterPadding(int filterLowLength,
int filterHighLength,
int filterLowFirstIndex,
int filterHighFirstIndex) {
int padding = filterLowLength > filterHighLength ? filterLowLength : filterHighLength;
padding -= filterLowFirstIndex < filterHighFirstIndex ? filterLowFirstIndex : filterHighFirstIndex;
return padding;
}
__device__ __host__ inline int getBlockNum(int dataLenght, int blockSize) {
if(dataLenght < blockSize) {
return 1;
}
int blockNum = dataLenght / blockSize;
blockNum += dataLenght % blockSize == 0 ? 0 : 1;
return blockNum;
}
__global__ void forwardTransform1D(float *input,
float *output,
int length,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.x * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int i = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element index
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue1D(input, threadLoadGroupOffset + 2 * threadIdx.x, length);
sharedInput[2 * threadIdx.x + 1] = getInputValue1D(input, threadLoadGroupOffset + 2 * threadIdx.x + 1, length);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (length + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(i >= lowLength) {
return;
}
output[i] = forwardStepLow(sharedInput, inputOffset, i, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(i + lowLength >= length) {
return;
}
output[i + lowLength] = forwardStepHigh(sharedInput, inputOffset, i, analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform1D(float *input,
float *output,
int length,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.x * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int i = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (length + 1) / 2; // Low subband length
int highLength = length - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue1D(input, loadThreadId, 2 * lowLength, 0, true);
highInput[threadIdx.x] = getInputValue1D(input, loadThreadId, 2 * highLength, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(i >= length || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[i] = reverseStep(lowInput, highInput, inputOffset, i, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void forwardTransform2DRow(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.x * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int x = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int y = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue2D(input, threadLoadIndex, y, true, width, currWidth, currHeight);
sharedInput[2 * threadIdx.x + 1] = getInputValue2D(input, threadLoadIndex + 1, y, true, width, currWidth, currHeight);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currWidth + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(x >= lowLength) {
return;
}
output[y * width + x] = forwardStepLow(sharedInput, inputOffset, x, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(x + lowLength >= currWidth) {
return;
}
output[y * width + x + lowLength] = forwardStepHigh(sharedInput, inputOffset, x, analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform2DColumn(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.y * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int y = computeThreadGroupOffset / 2 + threadIdx.y - padding; // current thread compute element y index
int x = blockIdx.x;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.y;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.y] = getInputValue2D(input, x, threadLoadIndex, false, width, currWidth, currHeight);
sharedInput[2 * threadIdx.y + 1] = getInputValue2D(input, x, threadLoadIndex + 1, false, width, currWidth, currHeight);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.y < padding) {
return;
}
int lowLength = (currHeight + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(y >= lowLength) {
return;
}
output[y * width + x] = forwardStepLow(sharedInput, inputOffset, y, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(y + lowLength >= currHeight) {
return;
}
output[(y + lowLength) * width + x] = forwardStepHigh(sharedInput, inputOffset, y, analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform2DRow(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.x * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int i = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int y = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currWidth + 1) / 2; // Low subband length
int highLength = currWidth - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue2D(input, loadThreadId, y, true, width, 2 * lowLength, currHeight, 0, true);
highInput[threadIdx.x] = getInputValue2D(input, loadThreadId, y, true, width, 2 * highLength, currHeight, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(i >= currWidth || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[y * width + i] = reverseStep(lowInput, highInput, inputOffset, i, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform2DColumn(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.y* (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.y; // current thread load element index
int y = threadComputeGroupOffset + threadIdx.y - padding; // current thread compute element index
int x = blockIdx.x;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currHeight + 1) / 2; // Low subband length
int highLength = currHeight - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.y] = getInputValue2D(input, x, loadThreadId, false, width, currWidth, 2 * lowLength, 0, true);
highInput[threadIdx.y] = getInputValue2D(input, x, loadThreadId, false, width, currWidth, 2 * highLength, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(y >= currHeight || threadIdx.y < padding || threadIdx.y >= THREADS_PER_BLOCK - padding) {
return;
}
output[y * width + x] = reverseStep(lowInput, highInput, inputOffset, y, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void forwardTransform3DRow(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currWidth, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int x = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int y = blockIdx.x / blockNumX;
int z = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, threadLoadIndex, y, z, FIXED_ROW, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, threadLoadIndex + 1, y, z, FIXED_ROW, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currWidth + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(x >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, x, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(x + lowLength >= currWidth) {
return;
}
output[z * width * height + y * width + x + lowLength] = forwardStepHigh(sharedInput, inputOffset, x,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform3DColumn(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currHeight, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int y = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int x = blockIdx.x / blockNumX;
int z = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, x, threadLoadIndex, z, FIXED_COLUMN, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, x, threadLoadIndex + 1, z, FIXED_COLUMN, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currHeight + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(y >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, y, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(y + lowLength >= currHeight) {
return;
}
output[z * width * height + (y + lowLength) * width + x] = forwardStepHigh(sharedInput, inputOffset, y,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform3DFrame(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currFrames, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int z = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int x = blockIdx.x / blockNumX;
int y = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, x, y, threadLoadIndex, FIXED_FRAME, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, x, y, threadLoadIndex + 1, FIXED_FRAME, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currFrames + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(z >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, z, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(z + lowLength >= currFrames) {
return;
}
output[(z + lowLength) * width * height + y * width + x] = forwardStepHigh(sharedInput, inputOffset, z,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform3DRow(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currWidth, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int x = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int y = blockIdx.x / blockNumX;
int z = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currWidth + 1) / 2; // Low subband length
int highLength = currWidth - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, loadThreadId, y, z, FIXED_ROW, width, height, 2 * lowLength,
currHeight, currFrames, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, loadThreadId, y, z, FIXED_ROW, width, height, 2 * highLength,
currHeight, currFrames, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(x >= currWidth || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, x, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform3DColumn(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currHeight, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int y = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int x = blockIdx.x / blockNumX;
int z = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currHeight + 1) / 2; // Low subband length
int highLength = currHeight - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, x, loadThreadId, z, FIXED_COLUMN, width, height, currWidth,
2 * lowLength, currFrames, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, x, loadThreadId, z, FIXED_COLUMN, width, height, currWidth,
2 * highLength, currFrames, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(y >= currHeight || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, y, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform3DFrame(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currFrames, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int z = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int x = blockIdx.x / blockNumX;
int y = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currFrames + 1) / 2; // Low subband length
int highLength = currFrames - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, x, y, loadThreadId, FIXED_FRAME, width, height, currWidth,
currHeight, 2 * lowLength, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, x, y, loadThreadId, FIXED_FRAME, width, height, currWidth,
currHeight, 2 * highLength, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(z >= currFrames || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, z, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
| 6ea78a88333f9acc62b2361f7e97da08ae6d772f.cu | /*
* This file is subject to the terms and conditions defined in
* file 'LICENSE.txt', which is part of this source code package.
*/
#include <cutil_inline.h>
const int THREADS_PER_BLOCK = 256; // Number of threads per block
const int FILTER_BUFF_LENGTH = 64; // Length of constant memory buffer used for filters coefficients
const int FIXED_ROW = 0;
const int FIXED_COLUMN = 1;
const int FIXED_FRAME = 2;
__constant__ float lowPassFilter[FILTER_BUFF_LENGTH]; // Constant memory buffer used for high pass filter coefficients
__constant__ float highPassFilter[FILTER_BUFF_LENGTH]; // Constant memory buffer used for low pass filter coefficients
__device__ inline int getMappedIndex(int index,
int inputLength,
bool ignoreOddIndex,
bool ignoreEvenIndex) {
int newIndex = -1;
// check if index is in range [0, length)
if(index >= 0 && index < inputLength) {
newIndex = index;
}
else {
if(index < 0) {
while(index < -inputLength) {
index += inputLength;
}
newIndex = inputLength + index;
}
else if(index >= inputLength) {
newIndex = index;
while(newIndex >= inputLength) {
newIndex = newIndex - inputLength;
}
}
}
if(ignoreOddIndex) {
if(abs(newIndex) % 2 == 1) {
return -1;
}
return newIndex;
}
if(ignoreEvenIndex) {
if(abs(newIndex) % 2 == 0) {
return -1;
}
return newIndex;
}
return newIndex;
}
__device__ inline float getInputValue1D(const float *input,
int index,
int inputLength,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = getMappedIndex(index, inputLength, ignoreOddIndex, ignoreEvenIndex);
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
return input[newIndex/2 + offset];
}
return input[newIndex];
}
__device__ inline float getInputValue2D(const float *input,
int x,
int y,
bool fixedRow,
int width,
int currWidth,
int currHeight,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = getMappedIndex(fixedRow ? x : y, fixedRow ? currWidth : currHeight,
ignoreOddIndex, ignoreEvenIndex);
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
if(fixedRow) {
return input[y * width + newIndex/2 + offset];
}
return input[(newIndex/2 + offset) * width + x];
}
return fixedRow ? input[y * width + newIndex] : input[newIndex * width + x];
}
__device__ inline float getInputValue3D(const float *input,
int x,
int y,
int z,
int fixedDim,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int offset = 0,
bool ignoreOddIndex = false,
bool ignoreEvenIndex = false) {
int newIndex = -1;
if(fixedDim == FIXED_ROW) {
newIndex = getMappedIndex(x, currWidth, ignoreOddIndex, ignoreEvenIndex);
}
else if(fixedDim == FIXED_COLUMN) {
newIndex = getMappedIndex(y, currHeight, ignoreOddIndex, ignoreEvenIndex);
}
else {
newIndex = getMappedIndex(z, currFrames, ignoreOddIndex, ignoreEvenIndex);
}
if(newIndex < 0) {
return 0.0f;
}
if(ignoreOddIndex || ignoreEvenIndex) {
if(fixedDim == FIXED_ROW) {
return input[z * width * height + y * width + newIndex/2 + offset];
} else if(fixedDim == FIXED_COLUMN) {
return input[z * width * height + (newIndex/2 + offset) * width + x];
} else {
return input[(newIndex/2 + offset) * width * height + y * width + x];
}
}
if(fixedDim == FIXED_ROW) {
return input[z * width * height + y * width + newIndex];
} else if(fixedDim == FIXED_COLUMN) {
return input[z * width * height + newIndex * width + x];
}
return input[newIndex * width * height + y * width + x];
}
__device__ inline float forwardStepLow(float *input,
int inputOffset,
int i,
int analysisLowLength,
int analysisLowFirstIndex) {
float value = 0.0f;
// Convolve with low pass analysis filter - aproximation coefficient
for(int j = 0; j < analysisLowLength; ++j) {
int k = (2 * i) + j - analysisLowFirstIndex;
float inputValue = input[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * lowPassFilter[j];
}
return value;
}
__device__ inline float forwardStepHigh(float *input,
int inputOffset,
int i,
int analysisHighLength,
int analysisHighFirstIndex) {
float value = 0.0f;
// Convolve with high pass analysis filter - detail coefficient
for(int j = 0; j < analysisHighLength; ++j) {
int k = (2 * i) + j - analysisHighFirstIndex;
float inputValue = input[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * highPassFilter[j];
}
return value;
}
__device__ inline float reverseStep(float *lowInput,
float *highInput,
int inputOffset,
int i,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
float value = 0.0f;
// Convolve with low pass synthesis filter
for(int j = 0; j < synthesisLowLength; ++j) {
int k = i - j + synthesisLowFirstIndex;
float inputValue = lowInput[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * lowPassFilter[j];
}
// Convolve with high pass synthesis filter
for(int j = 0; j < synthesisHighLength; ++j) {
int k = i - j + synthesisHighFirstIndex;
float inputValue = highInput[k - inputOffset];
if(inputValue == 0) {
continue;
}
value += inputValue * highPassFilter[j];
}
return value;
}
__device__ __host__ inline int getFilterPadding(int filterLowLength,
int filterHighLength,
int filterLowFirstIndex,
int filterHighFirstIndex) {
int padding = filterLowLength > filterHighLength ? filterLowLength : filterHighLength;
padding -= filterLowFirstIndex < filterHighFirstIndex ? filterLowFirstIndex : filterHighFirstIndex;
return padding;
}
__device__ __host__ inline int getBlockNum(int dataLenght, int blockSize) {
if(dataLenght < blockSize) {
return 1;
}
int blockNum = dataLenght / blockSize;
blockNum += dataLenght % blockSize == 0 ? 0 : 1;
return blockNum;
}
__global__ void forwardTransform1D(float *input,
float *output,
int length,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.x * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int i = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element index
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue1D(input, threadLoadGroupOffset + 2 * threadIdx.x, length);
sharedInput[2 * threadIdx.x + 1] = getInputValue1D(input, threadLoadGroupOffset + 2 * threadIdx.x + 1, length);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (length + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(i >= lowLength) {
return;
}
output[i] = forwardStepLow(sharedInput, inputOffset, i, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(i + lowLength >= length) {
return;
}
output[i + lowLength] = forwardStepHigh(sharedInput, inputOffset, i, analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform1D(float *input,
float *output,
int length,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.x * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int i = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (length + 1) / 2; // Low subband length
int highLength = length - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue1D(input, loadThreadId, 2 * lowLength, 0, true);
highInput[threadIdx.x] = getInputValue1D(input, loadThreadId, 2 * highLength, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(i >= length || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[i] = reverseStep(lowInput, highInput, inputOffset, i, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void forwardTransform2DRow(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.x * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int x = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int y = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue2D(input, threadLoadIndex, y, true, width, currWidth, currHeight);
sharedInput[2 * threadIdx.x + 1] = getInputValue2D(input, threadLoadIndex + 1, y, true, width, currWidth, currHeight);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currWidth + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(x >= lowLength) {
return;
}
output[y * width + x] = forwardStepLow(sharedInput, inputOffset, x, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(x + lowLength >= currWidth) {
return;
}
output[y * width + x + lowLength] = forwardStepHigh(sharedInput, inputOffset, x, analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform2DColumn(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * blockIdx.y * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int y = computeThreadGroupOffset / 2 + threadIdx.y - padding; // current thread compute element y index
int x = blockIdx.x;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.y;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.y] = getInputValue2D(input, x, threadLoadIndex, false, width, currWidth, currHeight);
sharedInput[2 * threadIdx.y + 1] = getInputValue2D(input, x, threadLoadIndex + 1, false, width, currWidth, currHeight);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.y < padding) {
return;
}
int lowLength = (currHeight + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(y >= lowLength) {
return;
}
output[y * width + x] = forwardStepLow(sharedInput, inputOffset, y, analysisLowLength, analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(y + lowLength >= currHeight) {
return;
}
output[(y + lowLength) * width + x] = forwardStepHigh(sharedInput, inputOffset, y, analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform2DRow(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.x * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int i = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int y = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currWidth + 1) / 2; // Low subband length
int highLength = currWidth - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue2D(input, loadThreadId, y, true, width, 2 * lowLength, currHeight, 0, true);
highInput[threadIdx.x] = getInputValue2D(input, loadThreadId, y, true, width, 2 * highLength, currHeight, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(i >= currWidth || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[y * width + i] = reverseStep(lowInput, highInput, inputOffset, i, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform2DColumn(float *input,
float *output,
int width,
int currWidth,
int currHeight,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int threadComputeGroupOffset = blockIdx.y* (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.y; // current thread load element index
int y = threadComputeGroupOffset + threadIdx.y - padding; // current thread compute element index
int x = blockIdx.x;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currHeight + 1) / 2; // Low subband length
int highLength = currHeight - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.y] = getInputValue2D(input, x, loadThreadId, false, width, currWidth, 2 * lowLength, 0, true);
highInput[threadIdx.y] = getInputValue2D(input, x, loadThreadId, false, width, currWidth, 2 * highLength, lowLength,
analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(y >= currHeight || threadIdx.y < padding || threadIdx.y >= THREADS_PER_BLOCK - padding) {
return;
}
output[y * width + x] = reverseStep(lowInput, highInput, inputOffset, y, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void forwardTransform3DRow(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currWidth, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int x = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int y = blockIdx.x / blockNumX;
int z = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, threadLoadIndex, y, z, FIXED_ROW, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, threadLoadIndex + 1, y, z, FIXED_ROW, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currWidth + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(x >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, x, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(x + lowLength >= currWidth) {
return;
}
output[z * width * height + y * width + x + lowLength] = forwardStepHigh(sharedInput, inputOffset, x,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform3DColumn(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currHeight, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int y = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int x = blockIdx.x / blockNumX;
int z = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, x, threadLoadIndex, z, FIXED_COLUMN, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, x, threadLoadIndex + 1, z, FIXED_COLUMN, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currHeight + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(y >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, y, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(y + lowLength >= currHeight) {
return;
}
output[z * width * height + (y + lowLength) * width + x] = forwardStepHigh(sharedInput, inputOffset, y,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void forwardTransform3DFrame(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int analysisLowLength,
int analysisHighLength,
int analysisLowFirstIndex,
int analysisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(analysisLowLength, analysisHighLength, analysisLowFirstIndex, analysisHighFirstIndex);
int computeThreadCount = 2 * (THREADS_PER_BLOCK - padding);
int blockNumX = getBlockNum(currFrames, computeThreadCount);
int computeThreadGroupSize = THREADS_PER_BLOCK - padding; // number of threads used to calculate dwt
int computeThreadGroupOffset = 2 * (blockIdx.x % blockNumX) * computeThreadGroupSize; // index of first compute thread
int threadLoadGroupOffset = computeThreadGroupOffset - padding; // index of first load thread
int z = computeThreadGroupOffset / 2 + threadIdx.x - padding; // current thread compute element x index
int x = blockIdx.x / blockNumX;
int y = blockIdx.y;
int threadLoadIndex = threadLoadGroupOffset + 2 * threadIdx.x;
// Load input data to shared memory; Each thread loads two elements
__shared__ float sharedInput[2 * THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
sharedInput[2 * threadIdx.x] = getInputValue3D(input, x, y, threadLoadIndex, FIXED_FRAME, width, height,
currWidth, currHeight, currFrames);
sharedInput[2 * threadIdx.x + 1] = getInputValue3D(input, x, y, threadLoadIndex + 1, FIXED_FRAME, width, height,
currWidth, currHeight, currFrames);
__syncthreads();
// Threads with id lower than padding are used only for loading data into shared memory
if(threadIdx.x < padding) {
return;
}
int lowLength = (currFrames + 1) / 2; // Low subband length
int inputOffset = computeThreadGroupOffset - padding;
// Check if outcome index is lower than low subband length
if(z >= lowLength) {
return;
}
output[z * width * height + y * width + x] = forwardStepLow(sharedInput, inputOffset, z, analysisLowLength,
analysisLowFirstIndex);
// Check if outcome index is lower than low subband length
if(z + lowLength >= currFrames) {
return;
}
output[(z + lowLength) * width * height + y * width + x] = forwardStepHigh(sharedInput, inputOffset, z,
analysisHighLength, analysisHighFirstIndex);
}
__global__ void reverseTransform3DRow(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currWidth, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int x = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int y = blockIdx.x / blockNumX;
int z = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currWidth + 1) / 2; // Low subband length
int highLength = currWidth - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, loadThreadId, y, z, FIXED_ROW, width, height, 2 * lowLength,
currHeight, currFrames, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, loadThreadId, y, z, FIXED_ROW, width, height, 2 * highLength,
currHeight, currFrames, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(x >= currWidth || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, x, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform3DColumn(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currHeight, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int y = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int x = blockIdx.x / blockNumX;
int z = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currHeight + 1) / 2; // Low subband length
int highLength = currHeight - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, x, loadThreadId, z, FIXED_COLUMN, width, height, currWidth,
2 * lowLength, currFrames, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, x, loadThreadId, z, FIXED_COLUMN, width, height, currWidth,
2 * highLength, currFrames, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(y >= currHeight || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, y, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
__global__ void reverseTransform3DFrame(float *input,
float *output,
int width,
int height,
int currWidth,
int currHeight,
int currFrames,
int synthesisLowLength,
int synthesisHighLength,
int synthesisLowFirstIndex,
int synthesisHighFirstIndex) {
// Calculate filter padding
int padding = getFilterPadding(synthesisLowLength, synthesisHighLength, synthesisLowFirstIndex, synthesisHighFirstIndex);
int computeThreadCount = THREADS_PER_BLOCK - 2 * padding;
int blockNumX = getBlockNum(currFrames, computeThreadCount);
int threadComputeGroupOffset = (blockIdx.x % blockNumX) * (THREADS_PER_BLOCK - 2 * padding); // index of first compute thread
int threadLoadGroupOffset = threadComputeGroupOffset - padding; // index of first load thread
int loadThreadId = threadLoadGroupOffset + threadIdx.x; // current thread load element index
int z = threadComputeGroupOffset + threadIdx.x - padding; // current thread compute element index
int x = blockIdx.x / blockNumX;
int y = blockIdx.y;
bool analysisIgnoreEven = synthesisHighLength % 2 == 0; // Check wether ignore even or odd input values
int lowLength = (currFrames + 1) / 2; // Low subband length
int highLength = currFrames - lowLength; // High subband length
int inputOffset = threadComputeGroupOffset - padding;
// Load input data to shared memory; Each thread one element for low pass and one element for high pass
__shared__ float lowInput [THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
__shared__ float highInput[THREADS_PER_BLOCK + 2 * FILTER_BUFF_LENGTH];
lowInput[threadIdx.x] = getInputValue3D(input, x, y, loadThreadId, FIXED_FRAME, width, height, currWidth,
currHeight, 2 * lowLength, 0, true);
highInput[threadIdx.x] = getInputValue3D(input, x, y, loadThreadId, FIXED_FRAME, width, height, currWidth,
currHeight, 2 * highLength, lowLength, analysisIgnoreEven, !analysisIgnoreEven);
__syncthreads();
// Check if outcome index is lower than low subband length and if thread is compute thread
// (not used only to load data into shared memory)
if(z >= currFrames || threadIdx.x < padding || threadIdx.x >= THREADS_PER_BLOCK - padding) {
return;
}
output[z * width * height + y * width + x] = reverseStep(lowInput, highInput, inputOffset, z, synthesisLowLength, synthesisHighLength,
synthesisLowFirstIndex, synthesisHighFirstIndex);
}
|
18d31abacd109b53a9c236b24bf412bd995f0de4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* parallel_renderer.cpp
*
* Created on: Dec 22, 2018
* Author: matt
*/
#include "cuda_error_check.h"
#include "renderer.h"
#include "scene.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
using std::cout;
#define BLOCK_WIDTH 16u
__constant__ float3 c_materialFloats[MAX_MATERIALS * MATERIALS_FLOAT_COMPONENTS];
__constant__ int2 c_materialIndices[MAX_MATERIALS];
__constant__ pixels_t c_width;
__constant__ float c_lightsSurfaceArea;
__constant__ uint c_numLights;
// Kernels
__global__ void initializeCurandKernel(hiprandState_t* p_curandState);
__global__ void renderKernel(float3* p_imgBuffer,
uchar4* p_outImg,
Camera camera,
SceneData* p_sceneData,
uint* p_lightsIndices,
hiprandState_t *p_curandState,
int sampleNumber);
__host__ ParallelRenderer::ParallelRenderer(Scene* _scenePtr, pixels_t _width, pixels_t _height, uint _samples) :
Renderer(_scenePtr, _width, _height, _samples) {
// CUDA settings
useCuda = true;
threadsPerBlock = BLOCK_WIDTH * BLOCK_WIDTH;
gridBlocks = width / BLOCK_WIDTH * height / BLOCK_WIDTH;
pixels_t pixels = width * height;
uint numTris = p_scene->getNumTriangles();
uint numMaterials = p_scene->getNumMaterials();
uint numBvhNodes = p_scene->getNumBvhNodes();
uint numLights = p_scene->getNumLights();
uint numTextures = p_scene->getNumTextures();
pixels_t totalTexturePixels = p_scene->getTotalTexturePixels();
size_t trianglesBytes = sizeof(Triangle) * numTris;
size_t curandBytes = sizeof(hiprandState_t) * threadsPerBlock;
size_t textureObjectBytes = sizeof(hipTextureObject_t) * (numTextures + TEXTURES_OFFSET);
d_imgVectorPtr = NULL;
d_imgBytesPtr = NULL;
d_camPtr = NULL;
d_triPtr = NULL;
d_cudaTexObjects = NULL;
d_lightsIndices = NULL;
d_sceneData = NULL;
d_curandStatePtr = NULL;
CUDA_CHECK_RETURN(hipMalloc((void**)&d_imgVectorPtr, sizeof(float3) * pixels));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_imgBytesPtr, sizeof(uchar4) * pixels));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_camPtr, sizeof(Camera)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_triPtr, trianglesBytes));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_lightsIndices, sizeof(uint) * numLights));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_sceneData, sizeof(SceneData)));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_curandStatePtr, curandBytes));
CUDA_CHECK_RETURN(hipMalloc((void**)&d_cudaTexObjects, textureObjectBytes));
copyMemoryToCuda();
initializeCurand();
}
__host__ ParallelRenderer::~ParallelRenderer() {
hipFree(d_imgVectorPtr);
hipFree(d_camPtr);
hipFree(d_triPtr);
hipFree(d_cudaTexObjects);
hipFree(d_lightsIndices);
hipFree(d_sceneData);
hipFree(d_curandStatePtr);
}
__host__ void ParallelRenderer::copyMemoryToCuda() {
uint numTris = p_scene->getNumTriangles();
uint numLights = p_scene->getNumLights();
uint numBvhNodes = p_scene->getNumBvhNodes();
uint numMaterials = p_scene->getNumMaterials();
uint numTextures = p_scene->getNumTextures();
pixels_t numTotalTexturePixels = p_scene->getTotalTexturePixels();
float lightsSurfaceArea = p_scene->getLightsSurfaceArea();
size_t trianglesBytes = sizeof(Triangle) * numTris;
size_t lightsIndicesBytes = sizeof(uint) * numLights;
size_t textureObjectBytes = sizeof(hipTextureObject_t) * (numTextures + TEXTURES_OFFSET);
Camera* h_camPtr = p_scene->getCameraPtr();
Triangle* h_triPtr = p_scene->getTriPtr();
Material* h_materialsPtr = p_scene->getMaterialsPtr();
SceneData* h_sceneData = (SceneData*)malloc(sizeof(SceneData));
uint* h_lightsIndices = p_scene->getLightsIndicesPtr();
CUDA_CHECK_RETURN(hipMemcpy(d_camPtr, h_camPtr, sizeof(Camera), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_triPtr, h_triPtr, trianglesBytes, hipMemcpyHostToDevice));
hipTextureObject_t* h_textureObjects = createTextureObjects();
CUDA_CHECK_RETURN(hipMemcpy(d_cudaTexObjects, h_textureObjects, textureObjectBytes, hipMemcpyHostToDevice));
h_sceneData->p_triangles = d_triPtr;
h_sceneData->p_cudaTexObjects = d_cudaTexObjects;
CUDA_CHECK_RETURN(hipMemcpy(d_sceneData, h_sceneData, sizeof(SceneData), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_lightsIndices, h_lightsIndices, lightsIndicesBytes, hipMemcpyHostToDevice));
createMaterialsData();
hipMemcpyToSymbol(c_numLights, &numLights, sizeof(uint));
hipMemcpyToSymbol(c_lightsSurfaceArea, &lightsSurfaceArea, sizeof(float));
hipMemcpyToSymbol(c_width, &width, sizeof(pixels_t));
free(h_sceneData);
}
__host__ hipTextureObject_t* ParallelRenderer::createTextureObjects() {
uint numTextures = p_scene->getNumTextures();
hipTextureObject_t* p_cudaTexObjects = new hipTextureObject_t[numTextures + TEXTURES_OFFSET];
//
// BVH
//
LinearBVHNode* h_bvh = p_scene->getBvhPtr();
size_t numBvhNodes = p_scene->getNumBvhNodes();
// Copy min and max
{
size_t size = numBvhNodes * 2 * sizeof(float4);
float4* h_buffer = new float4[numBvhNodes * 2];
for (uint i = 0; i < numBvhNodes; i++) {
h_buffer[2*i] = make_float4(h_bvh[i].min);
h_buffer[2*i + 1] = make_float4(h_bvh[i].max);
}
float4* d_buffer = NULL;
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer, size));
CUDA_CHECK_RETURN(hipMemcpy(d_buffer, h_buffer, size, hipMemcpyHostToDevice));
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = d_buffer;
resDesc.res.linear.desc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
resDesc.res.linear.sizeInBytes = size;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
hipTextureObject_t currentTexObject = 0;
hipCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[BVH_BOUNDS_OFFSET] = currentTexObject;
delete h_buffer;
}
// Copy indexes, numTriangles, and axis
{
size_t size = numBvhNodes * sizeof(int2);
int2* h_buffer = new int2[numBvhNodes];
for (uint i = 0; i < numBvhNodes; i++) {
h_buffer[i].x = h_bvh->secondChildOffset;
//
int32_t yValue = ((int32_t)(h_bvh->numTriangles) << 16) | ((int32_t)(h_bvh->axis));
h_buffer[i].y = yValue;
h_bvh++;
}
int2* d_buffer = NULL;
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer, size));
CUDA_CHECK_RETURN(hipMemcpy(d_buffer, h_buffer, size, hipMemcpyHostToDevice));
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = d_buffer;
resDesc.res.linear.desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
resDesc.res.linear.sizeInBytes = size;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
hipTextureObject_t currentTexObject = 0;
hipCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[BVH_INDEX_OFFSET] = currentTexObject;
delete h_buffer;
}
//
// Actual Textures
//
pixels_t* h_textureDimensions = p_scene->getTextureDimensionsPtr();
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
for (uint i = 0; i < numTextures; i++) {
float3* p_currentTextureData = p_scene->getTexturePtr(i);
pixels_t width = h_textureDimensions[2*i];
pixels_t height = h_textureDimensions[2*i + 1];
pixels_t numPixels = width * height;
size_t size = numPixels * sizeof(float4);
float4* p_currentTextureFormattedData = new float4[numPixels];
for (pixels_t j = 0; j < numPixels; j++) {
p_currentTextureFormattedData[j] = make_float4(p_currentTextureData[j]);
}
hipArray* cuArray = NULL;
CUDA_CHECK_RETURN(hipMallocArray(&cuArray, &channelDesc, width, height));
CUDA_CHECK_RETURN(hipMemcpyToArray(cuArray,
0,
0,
p_currentTextureFormattedData,
size,
hipMemcpyHostToDevice));
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuArray;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 1;
hipTextureObject_t currentTexObject = 0;
hipCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[i + TEXTURES_OFFSET] = currentTexObject;
delete p_currentTextureFormattedData;
}
return p_cudaTexObjects;
}
__host__ void ParallelRenderer::createMaterialsData() {
Material* p_materials = p_scene->getMaterialsPtr();
uint numMaterials = p_scene->getNumMaterials();
float3* p_floatBuffer = new float3[MAX_MATERIALS * MATERIALS_FLOAT_COMPONENTS];
int2* p_intBuffer = new int2[MAX_MATERIALS];
float3* p_currentFloat = p_floatBuffer;
int2* p_currentIndex = p_intBuffer;
for (uint i = 0; i < numMaterials; i++) {
*p_currentFloat++ = p_materials[i].kd;
*p_currentFloat++ = p_materials[i].ka;
*p_currentFloat++ = p_materials[i].ks;
*p_currentFloat++ = make_float3(p_materials[i].ns,
p_materials[i].ni,
p_materials[i].diffuseCoefficient);
*p_currentIndex++ = make_int2((int32_t)p_materials[i].bsdf,
(int32_t)p_materials[i].texKdIdx);
}
hipMemcpyToSymbol(c_materialFloats,
p_floatBuffer,
numMaterials * MATERIALS_FLOAT_COMPONENTS * sizeof(float3));
hipMemcpyToSymbol(c_materialIndices,
p_intBuffer,
numMaterials * sizeof(int2));
delete p_floatBuffer;
delete p_intBuffer;
}
__host__ void ParallelRenderer::initializeCurand() {
dim3 block = dim3(BLOCK_WIDTH, BLOCK_WIDTH, 1);
dim3 grid = dim3(width/BLOCK_WIDTH, height/BLOCK_WIDTH, 1);
hipLaunchKernelGGL(( initializeCurandKernel), dim3(1), dim3(block), 0, 0, d_curandStatePtr);
}
__host__ void ParallelRenderer::renderOneSamplePerPixel(uchar4* p_img) {
dim3 block = dim3(BLOCK_WIDTH, BLOCK_WIDTH, 1);
dim3 grid = dim3(width/BLOCK_WIDTH, height/BLOCK_WIDTH, 1);
Camera camera = *p_scene->getCameraPtr();
size_t sharedBytes = sizeof(Sampler) * BLOCK_WIDTH * BLOCK_WIDTH;
hipLaunchKernelGGL(( renderKernel), dim3(grid), dim3(block), sharedBytes, 0, d_imgVectorPtr,
p_img,
camera,
d_sceneData,
d_lightsIndices,
d_curandStatePtr,
samplesRendered);
samplesRendered++;
}
__host__ void ParallelRenderer::copyImageBytes(uchar4* p_img) {
pixels_t pixels = width * height;
size_t imgBytes = sizeof(uchar4) * pixels;
CUDA_CHECK_RETURN(hipMemcpy(h_imgPtr, p_img, imgBytes, hipMemcpyDeviceToHost));
for (uint i = 0; i < pixels; i++) {
gammaCorrectPixel(h_imgPtr[i]);
}
}
__global__ void initializeCurandKernel(hiprandState_t* p_curandState) {
uint idx = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
hiprand_init(1234, idx, 0, &p_curandState[idx]);
}
__global__ void renderKernel(float3* p_imgBuffer,
uchar4* p_outImg,
Camera camera,
SceneData* p_sceneData,
uint* p_lightsIndices,
hiprandState_t *p_curandState,
int sampleNumber) {
extern __shared__ Sampler p_samplers[];
uint x = (blockIdx.x * blockDim.x + threadIdx.x);
uint y = (blockIdx.y * blockDim.y + threadIdx.y);
uint blockOnlyIdx = threadIdx.x * blockDim.x + threadIdx.y;
uint idx = y * c_width + x;
p_samplers[blockOnlyIdx] = Sampler(&p_curandState[blockOnlyIdx]);
float3 color = samplePixel(x, y,
camera,
p_sceneData,
p_lightsIndices,
c_numLights,
c_lightsSurfaceArea,
&p_samplers[blockOnlyIdx],
c_materialFloats,
c_materialIndices);
p_imgBuffer[idx] = p_imgBuffer[idx] + color;
p_outImg[idx] = float3ToUchar4(p_imgBuffer[idx]/(float)(sampleNumber + 1));
}
| 18d31abacd109b53a9c236b24bf412bd995f0de4.cu | /*
* parallel_renderer.cpp
*
* Created on: Dec 22, 2018
* Author: matt
*/
#include "cuda_error_check.h"
#include "renderer.h"
#include "scene.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand_kernel.h>
#include <iostream>
using std::cout;
#define BLOCK_WIDTH 16u
__constant__ float3 c_materialFloats[MAX_MATERIALS * MATERIALS_FLOAT_COMPONENTS];
__constant__ int2 c_materialIndices[MAX_MATERIALS];
__constant__ pixels_t c_width;
__constant__ float c_lightsSurfaceArea;
__constant__ uint c_numLights;
// Kernels
__global__ void initializeCurandKernel(curandState* p_curandState);
__global__ void renderKernel(float3* p_imgBuffer,
uchar4* p_outImg,
Camera camera,
SceneData* p_sceneData,
uint* p_lightsIndices,
curandState *p_curandState,
int sampleNumber);
__host__ ParallelRenderer::ParallelRenderer(Scene* _scenePtr, pixels_t _width, pixels_t _height, uint _samples) :
Renderer(_scenePtr, _width, _height, _samples) {
// CUDA settings
useCuda = true;
threadsPerBlock = BLOCK_WIDTH * BLOCK_WIDTH;
gridBlocks = width / BLOCK_WIDTH * height / BLOCK_WIDTH;
pixels_t pixels = width * height;
uint numTris = p_scene->getNumTriangles();
uint numMaterials = p_scene->getNumMaterials();
uint numBvhNodes = p_scene->getNumBvhNodes();
uint numLights = p_scene->getNumLights();
uint numTextures = p_scene->getNumTextures();
pixels_t totalTexturePixels = p_scene->getTotalTexturePixels();
size_t trianglesBytes = sizeof(Triangle) * numTris;
size_t curandBytes = sizeof(curandState) * threadsPerBlock;
size_t textureObjectBytes = sizeof(cudaTextureObject_t) * (numTextures + TEXTURES_OFFSET);
d_imgVectorPtr = NULL;
d_imgBytesPtr = NULL;
d_camPtr = NULL;
d_triPtr = NULL;
d_cudaTexObjects = NULL;
d_lightsIndices = NULL;
d_sceneData = NULL;
d_curandStatePtr = NULL;
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_imgVectorPtr, sizeof(float3) * pixels));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_imgBytesPtr, sizeof(uchar4) * pixels));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_camPtr, sizeof(Camera)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_triPtr, trianglesBytes));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_lightsIndices, sizeof(uint) * numLights));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_sceneData, sizeof(SceneData)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_curandStatePtr, curandBytes));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_cudaTexObjects, textureObjectBytes));
copyMemoryToCuda();
initializeCurand();
}
__host__ ParallelRenderer::~ParallelRenderer() {
cudaFree(d_imgVectorPtr);
cudaFree(d_camPtr);
cudaFree(d_triPtr);
cudaFree(d_cudaTexObjects);
cudaFree(d_lightsIndices);
cudaFree(d_sceneData);
cudaFree(d_curandStatePtr);
}
__host__ void ParallelRenderer::copyMemoryToCuda() {
uint numTris = p_scene->getNumTriangles();
uint numLights = p_scene->getNumLights();
uint numBvhNodes = p_scene->getNumBvhNodes();
uint numMaterials = p_scene->getNumMaterials();
uint numTextures = p_scene->getNumTextures();
pixels_t numTotalTexturePixels = p_scene->getTotalTexturePixels();
float lightsSurfaceArea = p_scene->getLightsSurfaceArea();
size_t trianglesBytes = sizeof(Triangle) * numTris;
size_t lightsIndicesBytes = sizeof(uint) * numLights;
size_t textureObjectBytes = sizeof(cudaTextureObject_t) * (numTextures + TEXTURES_OFFSET);
Camera* h_camPtr = p_scene->getCameraPtr();
Triangle* h_triPtr = p_scene->getTriPtr();
Material* h_materialsPtr = p_scene->getMaterialsPtr();
SceneData* h_sceneData = (SceneData*)malloc(sizeof(SceneData));
uint* h_lightsIndices = p_scene->getLightsIndicesPtr();
CUDA_CHECK_RETURN(cudaMemcpy(d_camPtr, h_camPtr, sizeof(Camera), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_triPtr, h_triPtr, trianglesBytes, cudaMemcpyHostToDevice));
cudaTextureObject_t* h_textureObjects = createTextureObjects();
CUDA_CHECK_RETURN(cudaMemcpy(d_cudaTexObjects, h_textureObjects, textureObjectBytes, cudaMemcpyHostToDevice));
h_sceneData->p_triangles = d_triPtr;
h_sceneData->p_cudaTexObjects = d_cudaTexObjects;
CUDA_CHECK_RETURN(cudaMemcpy(d_sceneData, h_sceneData, sizeof(SceneData), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_lightsIndices, h_lightsIndices, lightsIndicesBytes, cudaMemcpyHostToDevice));
createMaterialsData();
cudaMemcpyToSymbol(c_numLights, &numLights, sizeof(uint));
cudaMemcpyToSymbol(c_lightsSurfaceArea, &lightsSurfaceArea, sizeof(float));
cudaMemcpyToSymbol(c_width, &width, sizeof(pixels_t));
free(h_sceneData);
}
__host__ cudaTextureObject_t* ParallelRenderer::createTextureObjects() {
uint numTextures = p_scene->getNumTextures();
cudaTextureObject_t* p_cudaTexObjects = new cudaTextureObject_t[numTextures + TEXTURES_OFFSET];
//
// BVH
//
LinearBVHNode* h_bvh = p_scene->getBvhPtr();
size_t numBvhNodes = p_scene->getNumBvhNodes();
// Copy min and max
{
size_t size = numBvhNodes * 2 * sizeof(float4);
float4* h_buffer = new float4[numBvhNodes * 2];
for (uint i = 0; i < numBvhNodes; i++) {
h_buffer[2*i] = make_float4(h_bvh[i].min);
h_buffer[2*i + 1] = make_float4(h_bvh[i].max);
}
float4* d_buffer = NULL;
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer, size));
CUDA_CHECK_RETURN(cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice));
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_buffer;
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
resDesc.res.linear.sizeInBytes = size;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
cudaTextureObject_t currentTexObject = 0;
cudaCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[BVH_BOUNDS_OFFSET] = currentTexObject;
delete h_buffer;
}
// Copy indexes, numTriangles, and axis
{
size_t size = numBvhNodes * sizeof(int2);
int2* h_buffer = new int2[numBvhNodes];
for (uint i = 0; i < numBvhNodes; i++) {
h_buffer[i].x = h_bvh->secondChildOffset;
//
int32_t yValue = ((int32_t)(h_bvh->numTriangles) << 16) | ((int32_t)(h_bvh->axis));
h_buffer[i].y = yValue;
h_bvh++;
}
int2* d_buffer = NULL;
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer, size));
CUDA_CHECK_RETURN(cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice));
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_buffer;
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
resDesc.res.linear.sizeInBytes = size;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
cudaTextureObject_t currentTexObject = 0;
cudaCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[BVH_INDEX_OFFSET] = currentTexObject;
delete h_buffer;
}
//
// Actual Textures
//
pixels_t* h_textureDimensions = p_scene->getTextureDimensionsPtr();
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
for (uint i = 0; i < numTextures; i++) {
float3* p_currentTextureData = p_scene->getTexturePtr(i);
pixels_t width = h_textureDimensions[2*i];
pixels_t height = h_textureDimensions[2*i + 1];
pixels_t numPixels = width * height;
size_t size = numPixels * sizeof(float4);
float4* p_currentTextureFormattedData = new float4[numPixels];
for (pixels_t j = 0; j < numPixels; j++) {
p_currentTextureFormattedData[j] = make_float4(p_currentTextureData[j]);
}
cudaArray* cuArray = NULL;
CUDA_CHECK_RETURN(cudaMallocArray(&cuArray, &channelDesc, width, height));
CUDA_CHECK_RETURN(cudaMemcpyToArray(cuArray,
0,
0,
p_currentTextureFormattedData,
size,
cudaMemcpyHostToDevice));
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
cudaTextureObject_t currentTexObject = 0;
cudaCreateTextureObject(¤tTexObject,
&resDesc,
&texDesc,
NULL);
p_cudaTexObjects[i + TEXTURES_OFFSET] = currentTexObject;
delete p_currentTextureFormattedData;
}
return p_cudaTexObjects;
}
__host__ void ParallelRenderer::createMaterialsData() {
Material* p_materials = p_scene->getMaterialsPtr();
uint numMaterials = p_scene->getNumMaterials();
float3* p_floatBuffer = new float3[MAX_MATERIALS * MATERIALS_FLOAT_COMPONENTS];
int2* p_intBuffer = new int2[MAX_MATERIALS];
float3* p_currentFloat = p_floatBuffer;
int2* p_currentIndex = p_intBuffer;
for (uint i = 0; i < numMaterials; i++) {
*p_currentFloat++ = p_materials[i].kd;
*p_currentFloat++ = p_materials[i].ka;
*p_currentFloat++ = p_materials[i].ks;
*p_currentFloat++ = make_float3(p_materials[i].ns,
p_materials[i].ni,
p_materials[i].diffuseCoefficient);
*p_currentIndex++ = make_int2((int32_t)p_materials[i].bsdf,
(int32_t)p_materials[i].texKdIdx);
}
cudaMemcpyToSymbol(c_materialFloats,
p_floatBuffer,
numMaterials * MATERIALS_FLOAT_COMPONENTS * sizeof(float3));
cudaMemcpyToSymbol(c_materialIndices,
p_intBuffer,
numMaterials * sizeof(int2));
delete p_floatBuffer;
delete p_intBuffer;
}
__host__ void ParallelRenderer::initializeCurand() {
dim3 block = dim3(BLOCK_WIDTH, BLOCK_WIDTH, 1);
dim3 grid = dim3(width/BLOCK_WIDTH, height/BLOCK_WIDTH, 1);
initializeCurandKernel<<<1, block, 0>>>(d_curandStatePtr);
}
__host__ void ParallelRenderer::renderOneSamplePerPixel(uchar4* p_img) {
dim3 block = dim3(BLOCK_WIDTH, BLOCK_WIDTH, 1);
dim3 grid = dim3(width/BLOCK_WIDTH, height/BLOCK_WIDTH, 1);
Camera camera = *p_scene->getCameraPtr();
size_t sharedBytes = sizeof(Sampler) * BLOCK_WIDTH * BLOCK_WIDTH;
renderKernel<<<grid, block, sharedBytes>>>(d_imgVectorPtr,
p_img,
camera,
d_sceneData,
d_lightsIndices,
d_curandStatePtr,
samplesRendered);
samplesRendered++;
}
__host__ void ParallelRenderer::copyImageBytes(uchar4* p_img) {
pixels_t pixels = width * height;
size_t imgBytes = sizeof(uchar4) * pixels;
CUDA_CHECK_RETURN(cudaMemcpy(h_imgPtr, p_img, imgBytes, cudaMemcpyDeviceToHost));
for (uint i = 0; i < pixels; i++) {
gammaCorrectPixel(h_imgPtr[i]);
}
}
__global__ void initializeCurandKernel(curandState* p_curandState) {
uint idx = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
curand_init(1234, idx, 0, &p_curandState[idx]);
}
__global__ void renderKernel(float3* p_imgBuffer,
uchar4* p_outImg,
Camera camera,
SceneData* p_sceneData,
uint* p_lightsIndices,
curandState *p_curandState,
int sampleNumber) {
extern __shared__ Sampler p_samplers[];
uint x = (blockIdx.x * blockDim.x + threadIdx.x);
uint y = (blockIdx.y * blockDim.y + threadIdx.y);
uint blockOnlyIdx = threadIdx.x * blockDim.x + threadIdx.y;
uint idx = y * c_width + x;
p_samplers[blockOnlyIdx] = Sampler(&p_curandState[blockOnlyIdx]);
float3 color = samplePixel(x, y,
camera,
p_sceneData,
p_lightsIndices,
c_numLights,
c_lightsSurfaceArea,
&p_samplers[blockOnlyIdx],
c_materialFloats,
c_materialIndices);
p_imgBuffer[idx] = p_imgBuffer[idx] + color;
p_outImg[idx] = float3ToUchar4(p_imgBuffer[idx]/(float)(sampleNumber + 1));
}
|
5802eb1a801897d8f1acc0788c4f7e68d88ad812.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Details on this algorithm can be found in:
// Green, O., 2017. "Efficient scalable median filtering using histogram-based operations",
// IEEE Transactions on Image Processing, 27(5), pp.2217-2228.
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
#define scanNeighbor(array, range, index, threadIndex) \
{ \
int v = 0; \
if (index <= threadIndex && threadIndex < range) \
v = array[threadIndex] + array[threadIndex-index]; \
__syncthreads(); \
if (index <= threadIndex && threadIndex < range) \
array[threadIndex] = v; \
}
#define findMedian(array, range, threadIndex, result, count, position) \
if (threadIndex < range) \
{ \
if (array[threadIndex+1] > position && array[threadIndex] <= position) \
{ \
*result = threadIndex+1; \
*count = array[threadIndex]; \
} \
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
scanNeighbor(Hscan, 8, 1, tx);
__syncthreads();
scanNeighbor(Hscan, 8, 2, tx);
__syncthreads();
scanNeighbor(Hscan, 8, 4, tx);
__syncthreads();
findMedian(Hscan, 7, tx, retval, countAtMed, medPos);
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
scanNeighbor(Hscan, 32, 1, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 2, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 4, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 8, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 16, tx);
__syncthreads();
findMedian(Hscan, 31, tx, retval, countAtMed, medPos);
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
int loopIndex = luc[firstBin];
if (loopIndex <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( loopIndex = j-r; loopIndex < ::min(j+r+1,cols); loopIndex++ ){
histogramAdd32(HFine[firstBin], hist+(loopIndex*256+(firstBin<<5) ) );
}
}
else{
for ( ; loopIndex < (j+r+1);loopIndex++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(loopIndex,cols_m_1)*256+(firstBin<<5) ),
hist+(::max(loopIndex-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
luc[firstBin] = loopIndex;
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,hipStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
hipLaunchKernelGGL(( cuMedianFilterMultiBlock), dim3(gridDim),dim3(blockDim),0, stream, src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( hipDeviceSynchronize() );
}
}}}
#endif
| 5802eb1a801897d8f1acc0788c4f7e68d88ad812.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Details on this algorithm can be found in:
// Green, O., 2017. "Efficient scalable median filtering using histogram-based operations",
// IEEE Transactions on Image Processing, 27(5), pp.2217-2228.
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
#define scanNeighbor(array, range, index, threadIndex) \
{ \
int v = 0; \
if (index <= threadIndex && threadIndex < range) \
v = array[threadIndex] + array[threadIndex-index]; \
__syncthreads(); \
if (index <= threadIndex && threadIndex < range) \
array[threadIndex] = v; \
}
#define findMedian(array, range, threadIndex, result, count, position) \
if (threadIndex < range) \
{ \
if (array[threadIndex+1] > position && array[threadIndex] <= position) \
{ \
*result = threadIndex+1; \
*count = array[threadIndex]; \
} \
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
scanNeighbor(Hscan, 8, 1, tx);
__syncthreads();
scanNeighbor(Hscan, 8, 2, tx);
__syncthreads();
scanNeighbor(Hscan, 8, 4, tx);
__syncthreads();
findMedian(Hscan, 7, tx, retval, countAtMed, medPos);
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
scanNeighbor(Hscan, 32, 1, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 2, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 4, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 8, tx);
__syncthreads();
scanNeighbor(Hscan, 32, 16, tx);
__syncthreads();
findMedian(Hscan, 31, tx, retval, countAtMed, medPos);
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
int loopIndex = luc[firstBin];
if (loopIndex <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( loopIndex = j-r; loopIndex < ::min(j+r+1,cols); loopIndex++ ){
histogramAdd32(HFine[firstBin], hist+(loopIndex*256+(firstBin<<5) ) );
}
}
else{
for ( ; loopIndex < (j+r+1);loopIndex++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(loopIndex,cols_m_1)*256+(firstBin<<5) ),
hist+(::max(loopIndex-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
luc[firstBin] = loopIndex;
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,cudaStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
cuMedianFilterMultiBlock<<<gridDim,blockDim,0, stream>>>(src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}}}
#endif
|
f66a6112bb88d99615b4e5e47ed824fd40084b16.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common_hip.cuh"
#include <hip/hip_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
#include <stdlib.h>
#define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); })
__global__ void blockreduce_argmax_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *x_max,
uint32_t *x_argmax)
{
__shared__ float cache[1024 + 32];
__shared__ uint32_t cache_idx[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = -CUDART_INF_F;
}
cache_idx[OFFSET_BANK(tid)] = tid;
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim && cache[OFFSET_BANK(tid)] < cache[OFFSET_BANK(tid + s)]) {
cache[OFFSET_BANK(tid)] = cache[OFFSET_BANK(tid + s)];
cache_idx[OFFSET_BANK(tid)] = cache_idx[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
x_max[block] = cache[0];
if (NULL != x_argmax) {
x_argmax[block] = cache_idx[0];
}
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_max_argmax_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_max,
uint32_t *xs_argmax,
hipStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
hipLaunchKernelGGL(( blockreduce_argmax_kernel), dim3(num_blocks), dim3(1024), 0, stream,
block_dim, num_blocks, xs, xs_max, xs_argmax);
}
/*__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = 0.0f;
}
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim) {
cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}*/
__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
cache[tid] = xs[idx];
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_sum_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_sum,
hipStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
hipLaunchKernelGGL(( blockreduce_sum_kernel), dim3(num_blocks), dim3(1024), 0, stream,
block_dim, num_blocks, xs, xs_sum);
}
__global__ void reduce_index_fwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *x,
const uint32_t *index,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
y[idx] = x[index[idx] + dim * idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_fwd_f32(
size_t dim,
size_t batch_sz,
const float *x,
const uint32_t *index,
float *y,
hipStream_t stream)
{
size_t n = batch_sz;
hipLaunchKernelGGL(( reduce_index_fwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
dim, batch_sz, x, index, y);
}
__global__ void reduce_index_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
dx[index[idx] + dim * idx] += dy[idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_bwd_f32(
size_t dim,
size_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx,
hipStream_t stream)
{
size_t n = batch_sz;
hipLaunchKernelGGL(( reduce_index_bwd_f32_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
dim, batch_sz, dy, index, dx);
}
| f66a6112bb88d99615b4e5e47ed824fd40084b16.cu | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
#include <stdlib.h>
#define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); })
__global__ void blockreduce_argmax_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *x_max,
uint32_t *x_argmax)
{
__shared__ float cache[1024 + 32];
__shared__ uint32_t cache_idx[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = -CUDART_INF_F;
}
cache_idx[OFFSET_BANK(tid)] = tid;
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim && cache[OFFSET_BANK(tid)] < cache[OFFSET_BANK(tid + s)]) {
cache[OFFSET_BANK(tid)] = cache[OFFSET_BANK(tid + s)];
cache_idx[OFFSET_BANK(tid)] = cache_idx[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
x_max[block] = cache[0];
if (NULL != x_argmax) {
x_argmax[block] = cache_idx[0];
}
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_max_argmax_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_max,
uint32_t *xs_argmax,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
blockreduce_argmax_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, xs, xs_max, xs_argmax);
}
/*__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = 0.0f;
}
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim) {
cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}*/
__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
cache[tid] = xs[idx];
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_sum_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_sum,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
blockreduce_sum_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, xs, xs_sum);
}
__global__ void reduce_index_fwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *x,
const uint32_t *index,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
y[idx] = x[index[idx] + dim * idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_fwd_f32(
size_t dim,
size_t batch_sz,
const float *x,
const uint32_t *index,
float *y,
cudaStream_t stream)
{
size_t n = batch_sz;
reduce_index_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, x, index, y);
}
__global__ void reduce_index_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
dx[index[idx] + dim * idx] += dy[idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_bwd_f32(
size_t dim,
size_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx,
cudaStream_t stream)
{
size_t n = batch_sz;
reduce_index_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, dy, index, dx);
}
|
aa2c4a1d6941bba9683acff3aaa662d8ff8fbf32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void ele_mult_kernel(const float *matA, const float *matB, float *output, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
output[ind] = matA[ind] * matB[ind];
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
/* TODO: Your code here */
size_t size = 1;
for (index_t i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
dim3 blocks;
dim3 threads;
float *output_data = (float *)output->data;
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( ele_mult_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, matA_data, matB_data, output_data, size);
else
hipLaunchKernelGGL(( ele_mult_kernel), dim3(blocks), dim3(threads), 0, 0, matA_data, matB_data, output_data, size);
if(p != NULL){
int size_a = 1, size_b = 1, size_c = 1;
for(int i = 0; i < matA -> ndim; i++)
size_a *= matA -> shape[i];
for(int i = 0; i < matB -> ndim; i++)
size_b *= matB -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_c *= output -> shape[i];
p -> input_memory = 1.0 * (size_a + size_b) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_c * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
} | aa2c4a1d6941bba9683acff3aaa662d8ff8fbf32.cu | #include "gpu_runtime.h"
__global__ void ele_mult_kernel(const float *matA, const float *matB, float *output, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
output[ind] = matA[ind] * matB[ind];
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
/* TODO: Your code here */
size_t size = 1;
for (index_t i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
dim3 blocks;
dim3 threads;
float *output_data = (float *)output->data;
const float *matA_data = (const float *)matA->data;
const float *matB_data = (const float *)matB->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
ele_mult_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(matA_data, matB_data, output_data, size);
else
ele_mult_kernel<<<blocks, threads>>>(matA_data, matB_data, output_data, size);
if(p != NULL){
int size_a = 1, size_b = 1, size_c = 1;
for(int i = 0; i < matA -> ndim; i++)
size_a *= matA -> shape[i];
for(int i = 0; i < matB -> ndim; i++)
size_b *= matB -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_c *= output -> shape[i];
p -> input_memory = 1.0 * (size_a + size_b) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_c * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
} |
1560439a9ac56612ffa9ffd04641603286e5d15a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* raic - RichieSam's Adventures in Cuda
*
* raic is the legal property of Adrian Astley
* Copyright Adrian Astley 2015
*/
#include "common/typedefs.h"
#include <stdio.h>
__global__ void cuda_kernel_texture_2d(unsigned char *surface, int width, int height, size_t pitch, float t) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) {
return;
}
// get a pointer to the pixel at (x,y)
float *pixel = (float *)(surface + y * pitch) + 4 * x;
// populate it
float value_x = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * x) / width - 1.0f));
float value_y = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * y) / height - 1.0f));
pixel[0] = 0.5 * pixel[0] + 0.5 * pow(value_x, 3.0f); // red
pixel[1] = 0.5 * pixel[1] + 0.5 * pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f * cos(t); // blue
pixel[3] = 1.0f; // alpha
}
void RenderFrame(void *buffer, uint width, uint height, size_t pitch, float t) {
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
hipLaunchKernelGGL(( cuda_kernel_texture_2d), dim3(Dg), dim3(Db), 0, 0, (unsigned char *)buffer, width, height, pitch, t);
error = hipGetLastError();
if (error != hipSuccess) {
printf("cuda_kernel_texture_2d() failed to launch error = %d\n", error);
}
} | 1560439a9ac56612ffa9ffd04641603286e5d15a.cu | /* raic - RichieSam's Adventures in Cuda
*
* raic is the legal property of Adrian Astley
* Copyright Adrian Astley 2015
*/
#include "common/typedefs.h"
#include <stdio.h>
__global__ void cuda_kernel_texture_2d(unsigned char *surface, int width, int height, size_t pitch, float t) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) {
return;
}
// get a pointer to the pixel at (x,y)
float *pixel = (float *)(surface + y * pitch) + 4 * x;
// populate it
float value_x = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * x) / width - 1.0f));
float value_y = 0.5f + 0.5f * cos(t + 10.0f * ((2.0f * y) / height - 1.0f));
pixel[0] = 0.5 * pixel[0] + 0.5 * pow(value_x, 3.0f); // red
pixel[1] = 0.5 * pixel[1] + 0.5 * pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f * cos(t); // blue
pixel[3] = 1.0f; // alpha
}
void RenderFrame(void *buffer, uint width, uint height, size_t pitch, float t) {
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
cuda_kernel_texture_2d<<<Dg, Db>>>((unsigned char *)buffer, width, height, pitch, t);
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("cuda_kernel_texture_2d() failed to launch error = %d\n", error);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.