hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e2dc6b7d8327ee73d95c6ab71cc38276161dfcf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_unmask_ops.h"
namespace caffe2 {
namespace {
__global__ void ComputeIndicesKernel(
const int numMasks,
const int maskSize,
int* indices,
bool* const masks[]) {
CUDA_1D_KERNEL_LOOP(i, maskSize) {
for (int j = 0; j < numMasks; ++j) {
if (masks[j][i]) {
indices[i] = j;
return;
}
}
CUDA_KERNEL_ASSERT(false);
}
}
__global__ void FillValuesKernel(
const int numMasks,
const int maskSize,
const size_t itemSize,
const int* indices,
char* const values[],
int* valueSizes,
char* dest) {
CUDA_1D_KERNEL_LOOP(j, numMasks) {
int k = 0;
for (int i = 0; i < maskSize; ++i) {
if (indices[i] == j) {
for (int h = 0; h < itemSize; ++h) {
dest[i * itemSize + h] = values[j][k * itemSize + h];
}
++k;
}
}
CUDA_KERNEL_ASSERT(valueSizes[j] == k);
}
}
} // namespace
template <>
class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanUnmaskOp(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
bool RunOnDevice() override {
int maskSize = Input(0).size();
int numMasks = InputSize() / 2;
const auto& meta = Input(1).meta();
auto* out = Output(0);
out->Resize(maskSize);
auto* dest = (char*)out->raw_mutable_data(meta);
ReinitializeTensor(&hostMasks_, {numMasks}, at::dtype<bool*>().device(CPU));
auto* hostMasksData = hostMasks_.mutable_data<bool*>();
ReinitializeTensor(
&hostValues_, {numMasks}, at::dtype<char*>().device(CPU));
auto* hostValuesData = hostValues_.mutable_data<char*>();
ReinitializeTensor(
&hostValueSizes_, {numMasks}, at::dtype<int>().device(CPU));
auto* hostValueSizesData = hostValueSizes_.mutable_data<int>();
for (int i = 0; i < numMasks; ++i) {
auto& mask = Input(i * 2);
CAFFE_ENFORCE_EQ(mask.ndim(), 1);
CAFFE_ENFORCE_EQ(mask.size(), maskSize);
hostMasksData[i] = const_cast<bool*>(mask.data<bool>());
const auto& value = Input(i * 2 + 1);
CAFFE_ENFORCE_EQ(value.ndim(), 1);
hostValuesData[i] = (char*)value.raw_data();
hostValueSizesData[i] = value.size();
}
masks_.CopyFrom(hostMasks_);
values_.CopyFrom(hostValues_);
valueSizes_.CopyFrom(hostValueSizes_);
ReinitializeTensor(&indices_, {maskSize}, at::dtype<int>().device(CUDA));
auto* indicesData = indices_.mutable_data<int>();
hipLaunchKernelGGL(( ComputeIndicesKernel),
dim3(min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
numMasks, maskSize, indicesData, masks_.data<bool*>());
auto* valueSizesData = valueSizes_.mutable_data<int>();
hipLaunchKernelGGL(( FillValuesKernel),
dim3(min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
numMasks,
maskSize,
meta.itemsize(),
indicesData,
values_.data<char*>(),
valueSizesData,
dest);
return true;
}
private:
Tensor indices_;
Tensor masks_{CUDA};
Tensor values_{CUDA};
Tensor valueSizes_{CUDA};
Tensor hostMasks_;
Tensor hostValues_;
Tensor hostValueSizes_;
};
REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>);
} // caffe2
| e2dc6b7d8327ee73d95c6ab71cc38276161dfcf7.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_unmask_ops.h"
namespace caffe2 {
namespace {
__global__ void ComputeIndicesKernel(
const int numMasks,
const int maskSize,
int* indices,
bool* const masks[]) {
CUDA_1D_KERNEL_LOOP(i, maskSize) {
for (int j = 0; j < numMasks; ++j) {
if (masks[j][i]) {
indices[i] = j;
return;
}
}
CUDA_KERNEL_ASSERT(false);
}
}
__global__ void FillValuesKernel(
const int numMasks,
const int maskSize,
const size_t itemSize,
const int* indices,
char* const values[],
int* valueSizes,
char* dest) {
CUDA_1D_KERNEL_LOOP(j, numMasks) {
int k = 0;
for (int i = 0; i < maskSize; ++i) {
if (indices[i] == j) {
for (int h = 0; h < itemSize; ++h) {
dest[i * itemSize + h] = values[j][k * itemSize + h];
}
++k;
}
}
CUDA_KERNEL_ASSERT(valueSizes[j] == k);
}
}
} // namespace
template <>
class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanUnmaskOp(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
bool RunOnDevice() override {
int maskSize = Input(0).size();
int numMasks = InputSize() / 2;
const auto& meta = Input(1).meta();
auto* out = Output(0);
out->Resize(maskSize);
auto* dest = (char*)out->raw_mutable_data(meta);
ReinitializeTensor(&hostMasks_, {numMasks}, at::dtype<bool*>().device(CPU));
auto* hostMasksData = hostMasks_.mutable_data<bool*>();
ReinitializeTensor(
&hostValues_, {numMasks}, at::dtype<char*>().device(CPU));
auto* hostValuesData = hostValues_.mutable_data<char*>();
ReinitializeTensor(
&hostValueSizes_, {numMasks}, at::dtype<int>().device(CPU));
auto* hostValueSizesData = hostValueSizes_.mutable_data<int>();
for (int i = 0; i < numMasks; ++i) {
auto& mask = Input(i * 2);
CAFFE_ENFORCE_EQ(mask.ndim(), 1);
CAFFE_ENFORCE_EQ(mask.size(), maskSize);
hostMasksData[i] = const_cast<bool*>(mask.data<bool>());
const auto& value = Input(i * 2 + 1);
CAFFE_ENFORCE_EQ(value.ndim(), 1);
hostValuesData[i] = (char*)value.raw_data();
hostValueSizesData[i] = value.size();
}
masks_.CopyFrom(hostMasks_);
values_.CopyFrom(hostValues_);
valueSizes_.CopyFrom(hostValueSizes_);
ReinitializeTensor(&indices_, {maskSize}, at::dtype<int>().device(CUDA));
auto* indicesData = indices_.mutable_data<int>();
ComputeIndicesKernel<<<
min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
numMasks, maskSize, indicesData, masks_.data<bool*>());
auto* valueSizesData = valueSizes_.mutable_data<int>();
FillValuesKernel<<<
min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
numMasks,
maskSize,
meta.itemsize(),
indicesData,
values_.data<char*>(),
valueSizesData,
dest);
return true;
}
private:
Tensor indices_;
Tensor masks_{CUDA};
Tensor values_{CUDA};
Tensor valueSizes_{CUDA};
Tensor hostMasks_;
Tensor hostValues_;
Tensor hostValueSizes_;
};
REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>);
} // caffe2
|
7635d1cf66d7cb2eab5bcae38b4aeff362f4e2ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <ctime>
#include <stdio.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define M 12
#define N 6
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n", __FILE__, __LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE); }}
void matrixSumGPU(float* h_z, const float* h_x, const float* h_y);
void matrixSumCPU(float h_z[M][N], float h_x[M][N], float h_y[M][N]);
__global__ void matrixSumKernel(float d_z[][N], float d_x[][N], float d_y[][N])
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy];
}
void matrixSumCPU(float h_z[M][N], float h_x[M][N], float h_y[M][N])
{
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_z[i][j] = h_x[i][j] + h_y[i][j];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("%0.2f ms in CPU.\n", elapsedTime);
}
void matrixSumGPU(float* h_z, const float* h_x, const float* h_y)
{
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float(*d_x)[N], (*d_y)[N], (*d_z)[N];
CUDA_CALL(hipMalloc((void**)&d_x, (M*N) * sizeof(int)));
CUDA_CALL(hipMalloc((void**)&d_y, (M*N) * sizeof(int)));
CUDA_CALL(hipMalloc((void**)&d_z, (M*N) * sizeof(int)));
CUDA_CALL(hipMemcpy(d_x, h_x, (M*N) * sizeof(int), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_y, h_y, (M*N) * sizeof(int), hipMemcpyHostToDevice));
int numBlocks = 1;
dim3 threadsPerBlock(M, N);
matrixSumKernel << <numBlocks, threadsPerBlock >> >(d_z, d_x, d_y);
CUDA_CALL(hipMemcpy(h_z, d_z, (M*N) * sizeof(int), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(d_x));
CUDA_CALL(hipFree(d_y));
CUDA_CALL(hipFree(d_z));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("%0.2f ms in GPU.\n", elapsedTime);
}
int main() {
float h_x[M][N], h_y[M][N], h_z[M][N];
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_z[i][j] = 0;
h_x[i][j] = 1;
h_y[i][j] = 2;
}
}
matrixSumGPU(*h_z, *h_x, *h_y);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
printf("%.2f\t", h_z[i][j]);
}
printf("\n");
}
printf("\n");
matrixSumCPU(h_z, h_x, h_y);
printf("Press enter key to return...");
getchar();
return 0;
}
| 7635d1cf66d7cb2eab5bcae38b4aeff362f4e2ec.cu | #include <ctime>
#include <stdio.h>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define M 12
#define N 6
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n", __FILE__, __LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE); }}
void matrixSumGPU(float* h_z, const float* h_x, const float* h_y);
void matrixSumCPU(float h_z[M][N], float h_x[M][N], float h_y[M][N]);
__global__ void matrixSumKernel(float d_z[][N], float d_x[][N], float d_y[][N])
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
d_z[idx][idy] = d_x[idx][idy] + d_y[idx][idy];
}
void matrixSumCPU(float h_z[M][N], float h_x[M][N], float h_y[M][N])
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_z[i][j] = h_x[i][j] + h_y[i][j];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("%0.2f ms in CPU.\n", elapsedTime);
}
void matrixSumGPU(float* h_z, const float* h_x, const float* h_y)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float(*d_x)[N], (*d_y)[N], (*d_z)[N];
CUDA_CALL(cudaMalloc((void**)&d_x, (M*N) * sizeof(int)));
CUDA_CALL(cudaMalloc((void**)&d_y, (M*N) * sizeof(int)));
CUDA_CALL(cudaMalloc((void**)&d_z, (M*N) * sizeof(int)));
CUDA_CALL(cudaMemcpy(d_x, h_x, (M*N) * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_y, h_y, (M*N) * sizeof(int), cudaMemcpyHostToDevice));
int numBlocks = 1;
dim3 threadsPerBlock(M, N);
matrixSumKernel << <numBlocks, threadsPerBlock >> >(d_z, d_x, d_y);
CUDA_CALL(cudaMemcpy(h_z, d_z, (M*N) * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(d_x));
CUDA_CALL(cudaFree(d_y));
CUDA_CALL(cudaFree(d_z));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("%0.2f ms in GPU.\n", elapsedTime);
}
int main() {
float h_x[M][N], h_y[M][N], h_z[M][N];
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
h_z[i][j] = 0;
h_x[i][j] = 1;
h_y[i][j] = 2;
}
}
matrixSumGPU(*h_z, *h_x, *h_y);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
printf("%.2f\t", h_z[i][j]);
}
printf("\n");
}
printf("\n");
matrixSumCPU(h_z, h_x, h_y);
printf("Press enter key to return...");
getchar();
return 0;
}
|
46ab255d41a365fbcda8516d74e23be576a6f75a.hip | // !!! This is a file automatically generated by hipify!!!
#include "GpuColorer.h"
#include "GraphAux.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
extern "C" {
#include "CpuColorer.h"
#include "common.h"
}
#include "hip/device_functions.h"
#define THREADxBLOCK 256
Colorer* GpuColor(Graph* graph, int type) {
Colorer* colorer;
CHECK(hipMallocManaged(&colorer, sizeof(Colorer)));
uint n = graph->nodeSize;
colorer->uncoloredNodes = true;
colorer->misNotFound = true;
// hipMalloc for arrays of struct Coloring;
CHECK(hipMallocManaged(&(colorer->coloring), n * sizeof(uint)));
memset(colorer->coloring, 0, n * sizeof(uint));
// allocate space on the GPU for the random states
hiprandState_t* states;
uint* weigths;
uint* permutation;
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
uint seed = 0;
// start coloring (dyn. parall.)
switch (type) {
case 0: // LUBY
LubyColorer(colorer, graph);
break;
case 1: // JP
//CHECK(hipMalloc((void**)&states, n * sizeof(hiprandState_t)));
weigths = cpuInit(n);
//init << < blocks, threads >> > (seed, states, weigths, n);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( JPcolorer) , dim3(1), dim3(1) , 0, 0, colorer, graph, weigths);
hipDeviceSynchronize();
colorer->numOfColors = findMax(colorer, n);
//hipFree(states);
hipFree(weigths);
break;
case 2: // LDF
//CHECK(hipMalloc((void**)&states, n * sizeof(hiprandState_t)));
weigths = cpuInit(n);
//init << < blocks, threads >> > (seed, states, weigths, n);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( LDFcolorer) , dim3(1), dim3(1) , 0, 0, colorer, graph, weigths);
hipDeviceSynchronize();
colorer->numOfColors = findMax(colorer, n);
//hipFree(states);
hipFree(weigths);
break;
}
return colorer;
}
void LubyColorer(Colorer* colorer, Graph* graph) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
uint n = graph->nodeSize;
uint* permutation;
CHECK(hipMallocManaged(&permutation, n * sizeof(uint)));
colorer->numOfColors = 0;
// loop on ISs covering the graph
while (colorer->uncoloredNodes) {
managedRandomPermutation(n, permutation);
colorer->uncoloredNodes = false;
colorer->numOfColors++;
while (colorer->misNotFound) {
colorer->misNotFound = false;
LubyfindMIS << < blocks, threads >> > (colorer, graph, permutation);
hipDeviceSynchronize();
RemoveNeighs << < blocks, threads >> > (colorer, graph, permutation);
hipDeviceSynchronize();
}
colorMIS << < blocks, threads >> > (colorer, graph, permutation);
hipDeviceSynchronize();
}
CHECK(hipFree(permutation));
}
__global__ void LubyfindMIS(Colorer* colorer, Graph* graph, uint* permutation) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
uint numColors = colorer->numOfColors;
if (idx >= graph->nodeSize || colorer->coloring[idx] != 0) {
return;
}
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if ((colorer->coloring[neighID] == 0 || colorer->coloring[neighID] == -1) && (permutation[idx] < permutation[neighID])) {
colorer->uncoloredNodes = true;
colorer->misNotFound = true;
return;
}
}
colorer->coloring[idx] = -1;
return;
}
__global__ void RemoveNeighs(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize || colorer->coloring[idx] != 0) {
return;
}
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if (colorer->coloring[neighID] == -1) {
colorer->coloring[idx] = -2;
return;
}
}
}
__global__ void colorMIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
colorer->misNotFound = true;
if (colorer->coloring[idx] == -1 && idx < graph->nodeSize) {
colorer->coloring[idx] = colorer->numOfColors;
}
else if (colorer->coloring[idx] == -2 && idx < graph->nodeSize){
colorer->coloring[idx] = 0;
}
else {
return;
}
}
void managedRandomPermutation(uint n, uint* permutation) {
// initial range of numbers
for (int i = 0;i < n;++i) {
permutation[i] = i + 1;
}
// shuffle
for (int i = n - 1; i >= 0; --i) {
//generate a random number [0, n-1]
int j = rand() % (i + 1);
//swap the last element with element at random index
int temp = permutation[i];
permutation[i] = permutation[j];
permutation[j] = temp;
}
}
/**
* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each
*/
__global__ void init(uint seed, hiprandState_t* states, uint* numbers, uint n) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > n)
return;
hiprand_init(seed, idx, 0, &states[idx]);
numbers[idx] = hiprand(&states[idx]);
}
uint* cpuInit(uint n) {
uint* numbers;
srand(time(NULL));
CHECK(hipMallocManaged(&numbers, n * sizeof(uint)));
for (int i = 0; i < n; i++) {
numbers[i] = rand();
}
return numbers;
}
/**
* Luby IS & LonesPlassmann colorer
*/
__global__ void JPcolorer(Colorer* colorer, Graph* graph, uint* weights) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
// loop on ISs covering the graph
colorer->numOfColors = 0;
while (colorer->uncoloredNodes) {
colorer->uncoloredNodes = false;
colorer->numOfColors++;
JPfindIS << < blocks, threads >> > (colorer, graph, weights);
hipDeviceSynchronize();
colorIsWithMin << < blocks, threads >> > (colorer, graph, weights);
hipDeviceSynchronize();
}
}
__global__ void JPfindIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize)
return;
if (colorer->coloring[idx])
return;
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
uint degNeigh = weights[neighID];
if (colorer->coloring[neighID] <= 0 && ((weights[idx] < weights[neighID]) || ((weights[idx] == weights[neighID]) && idx < neighID))) {
colorer->uncoloredNodes = true;
return;
}
}
colorer->coloring[idx] = -1;
}
/**
*LDF colorer
*/
__global__ void LDFcolorer(Colorer* colorer, Graph* graph, uint* weights) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
// loop on ISs covering the graph
colorer->numOfColors = 0;
while (colorer->uncoloredNodes) {
colorer->uncoloredNodes = false;
colorer->numOfColors++;
hipLaunchKernelGGL(( LDFfindIS) , dim3(blocks), dim3(threads) , 0, 0, colorer, graph, weights);
hipDeviceSynchronize();
hipLaunchKernelGGL(( colorIsWithMin) , dim3(blocks), dim3(threads) , 0, 0, colorer, graph, weights);
hipDeviceSynchronize();
}
}
/**
* find an IS
*/
__global__ void LDFfindIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize)
return;
if (colorer->coloring[idx])
return;
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
uint degNeigh = graph->cumDegs[neighID + 1] - graph->cumDegs[neighID];
if (colorer->coloring[neighID] <= 0 && ((deg < degNeigh) || ((deg == degNeigh) && weights[idx] < weights[neighID]))) {
colorer->uncoloredNodes = true;
return;
}
}
colorer->coloring[idx] = -1;
}
/**
* color an IS
*/
__global__ void colorIsWithMin(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (colorer->coloring[idx] == -1 && idx < graph->nodeSize) {
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
//find lowest color available
int lowest = 0;
for (uint k = 1; k <= deg + 1; k++) { // <= because there are at most n+1 colors, we start from 0 because tha 0 is for non-colored
bool candidate = true;
lowest = k;
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if (colorer->coloring[neighID] == k) {
candidate = false;
break;
}
}
if (candidate) {
break;
}
}
colorer->coloring[idx] = lowest;
}
else {
return;
}
}
int findMax(Colorer* colorer, int n) {
int max = 0;
int index = 0;
for (int i = 0; i < n; i++) {
if (colorer->coloring[i] > max) {
max = colorer->coloring[i];
index = i;
}
}
//printf("max %d at index %d\n", max, index);
return max;
}
| 46ab255d41a365fbcda8516d74e23be576a6f75a.cu |
#include "GpuColorer.h"
#include "GraphAux.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <cuda.h>
extern "C" {
#include "CpuColorer.h"
#include "common.h"
}
#include "device_functions.h"
#define THREADxBLOCK 256
Colorer* GpuColor(Graph* graph, int type) {
Colorer* colorer;
CHECK(cudaMallocManaged(&colorer, sizeof(Colorer)));
uint n = graph->nodeSize;
colorer->uncoloredNodes = true;
colorer->misNotFound = true;
// cudaMalloc for arrays of struct Coloring;
CHECK(cudaMallocManaged(&(colorer->coloring), n * sizeof(uint)));
memset(colorer->coloring, 0, n * sizeof(uint));
// allocate space on the GPU for the random states
curandState_t* states;
uint* weigths;
uint* permutation;
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
uint seed = 0;
// start coloring (dyn. parall.)
switch (type) {
case 0: // LUBY
LubyColorer(colorer, graph);
break;
case 1: // JP
//CHECK(cudaMalloc((void**)&states, n * sizeof(curandState_t)));
weigths = cpuInit(n);
//init << < blocks, threads >> > (seed, states, weigths, n);
//cudaDeviceSynchronize();
JPcolorer <<< 1, 1 >>> (colorer, graph, weigths);
cudaDeviceSynchronize();
colorer->numOfColors = findMax(colorer, n);
//cudaFree(states);
cudaFree(weigths);
break;
case 2: // LDF
//CHECK(cudaMalloc((void**)&states, n * sizeof(curandState_t)));
weigths = cpuInit(n);
//init << < blocks, threads >> > (seed, states, weigths, n);
//cudaDeviceSynchronize();
LDFcolorer <<< 1, 1 >>> (colorer, graph, weigths);
cudaDeviceSynchronize();
colorer->numOfColors = findMax(colorer, n);
//cudaFree(states);
cudaFree(weigths);
break;
}
return colorer;
}
void LubyColorer(Colorer* colorer, Graph* graph) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
uint n = graph->nodeSize;
uint* permutation;
CHECK(cudaMallocManaged(&permutation, n * sizeof(uint)));
colorer->numOfColors = 0;
// loop on ISs covering the graph
while (colorer->uncoloredNodes) {
managedRandomPermutation(n, permutation);
colorer->uncoloredNodes = false;
colorer->numOfColors++;
while (colorer->misNotFound) {
colorer->misNotFound = false;
LubyfindMIS << < blocks, threads >> > (colorer, graph, permutation);
cudaDeviceSynchronize();
RemoveNeighs << < blocks, threads >> > (colorer, graph, permutation);
cudaDeviceSynchronize();
}
colorMIS << < blocks, threads >> > (colorer, graph, permutation);
cudaDeviceSynchronize();
}
CHECK(cudaFree(permutation));
}
__global__ void LubyfindMIS(Colorer* colorer, Graph* graph, uint* permutation) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
uint numColors = colorer->numOfColors;
if (idx >= graph->nodeSize || colorer->coloring[idx] != 0) {
return;
}
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if ((colorer->coloring[neighID] == 0 || colorer->coloring[neighID] == -1) && (permutation[idx] < permutation[neighID])) {
colorer->uncoloredNodes = true;
colorer->misNotFound = true;
return;
}
}
colorer->coloring[idx] = -1;
return;
}
__global__ void RemoveNeighs(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize || colorer->coloring[idx] != 0) {
return;
}
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if (colorer->coloring[neighID] == -1) {
colorer->coloring[idx] = -2;
return;
}
}
}
__global__ void colorMIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
colorer->misNotFound = true;
if (colorer->coloring[idx] == -1 && idx < graph->nodeSize) {
colorer->coloring[idx] = colorer->numOfColors;
}
else if (colorer->coloring[idx] == -2 && idx < graph->nodeSize){
colorer->coloring[idx] = 0;
}
else {
return;
}
}
void managedRandomPermutation(uint n, uint* permutation) {
// initial range of numbers
for (int i = 0;i < n;++i) {
permutation[i] = i + 1;
}
// shuffle
for (int i = n - 1; i >= 0; --i) {
//generate a random number [0, n-1]
int j = rand() % (i + 1);
//swap the last element with element at random index
int temp = permutation[i];
permutation[i] = permutation[j];
permutation[j] = temp;
}
}
/**
* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each
*/
__global__ void init(uint seed, curandState_t* states, uint* numbers, uint n) {
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > n)
return;
curand_init(seed, idx, 0, &states[idx]);
numbers[idx] = curand(&states[idx]);
}
uint* cpuInit(uint n) {
uint* numbers;
srand(time(NULL));
CHECK(cudaMallocManaged(&numbers, n * sizeof(uint)));
for (int i = 0; i < n; i++) {
numbers[i] = rand();
}
return numbers;
}
/**
* Luby IS & Lones−Plassmann colorer
*/
__global__ void JPcolorer(Colorer* colorer, Graph* graph, uint* weights) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
// loop on ISs covering the graph
colorer->numOfColors = 0;
while (colorer->uncoloredNodes) {
colorer->uncoloredNodes = false;
colorer->numOfColors++;
JPfindIS << < blocks, threads >> > (colorer, graph, weights);
cudaDeviceSynchronize();
colorIsWithMin << < blocks, threads >> > (colorer, graph, weights);
cudaDeviceSynchronize();
}
}
__global__ void JPfindIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize)
return;
if (colorer->coloring[idx])
return;
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
uint degNeigh = weights[neighID];
if (colorer->coloring[neighID] <= 0 && ((weights[idx] < weights[neighID]) || ((weights[idx] == weights[neighID]) && idx < neighID))) {
colorer->uncoloredNodes = true;
return;
}
}
colorer->coloring[idx] = -1;
}
/**
*LDF colorer
*/
__global__ void LDFcolorer(Colorer* colorer, Graph* graph, uint* weights) {
dim3 threads(THREADxBLOCK);
dim3 blocks((graph->nodeSize + threads.x - 1) / threads.x, 1, 1);
// loop on ISs covering the graph
colorer->numOfColors = 0;
while (colorer->uncoloredNodes) {
colorer->uncoloredNodes = false;
colorer->numOfColors++;
LDFfindIS <<< blocks, threads >>> (colorer, graph, weights);
cudaDeviceSynchronize();
colorIsWithMin <<< blocks, threads >>> (colorer, graph, weights);
cudaDeviceSynchronize();
}
}
/**
* find an IS
*/
__global__ void LDFfindIS(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= graph->nodeSize)
return;
if (colorer->coloring[idx])
return;
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
uint degNeigh = graph->cumDegs[neighID + 1] - graph->cumDegs[neighID];
if (colorer->coloring[neighID] <= 0 && ((deg < degNeigh) || ((deg == degNeigh) && weights[idx] < weights[neighID]))) {
colorer->uncoloredNodes = true;
return;
}
}
colorer->coloring[idx] = -1;
}
/**
* color an IS
*/
__global__ void colorIsWithMin(Colorer* colorer, Graph* graph, uint* weights) {
uint idx = threadIdx.x + blockDim.x * blockIdx.x;
if (colorer->coloring[idx] == -1 && idx < graph->nodeSize) {
uint offset = graph->cumDegs[idx];
uint deg = graph->cumDegs[idx + 1] - graph->cumDegs[idx];
//find lowest color available
int lowest = 0;
for (uint k = 1; k <= deg + 1; k++) { // <= because there are at most n+1 colors, we start from 0 because tha 0 is for non-colored
bool candidate = true;
lowest = k;
for (uint j = 0; j < deg; j++) {
uint neighID = graph->neighs[offset + j];
if (colorer->coloring[neighID] == k) {
candidate = false;
break;
}
}
if (candidate) {
break;
}
}
colorer->coloring[idx] = lowest;
}
else {
return;
}
}
int findMax(Colorer* colorer, int n) {
int max = 0;
int index = 0;
for (int i = 0; i < n; i++) {
if (colorer->coloring[i] > max) {
max = colorer->coloring[i];
index = i;
}
}
//printf("max %d at index %d\n", max, index);
return max;
}
|
c7cb234eb37f4a37fb281c34c15d824d9576eb88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_functions.h"
/* ---------------------------------- __device__ functions (helper funcs, wave funcs, potential etc.) ------------------------------------------- */
/* returns the closest discrete wavefunction value for given continuous arguments*/
__device__ double psi_1s_QW(wf_parameter_struct* gpu_X_wf_params, double* wf, double rho, double z_e, double z_h) {
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
int iRho, iZe, iZh;
double psi;
iRho = floor(rho / dZ + 0.5) - 1;
iZe = floor((z_e + dZ * (sizeZe + 1) / 2) / dZ + 0.5);
iZh = floor((z_h + dZ * (sizeZe + 1) / 2) / dZ + 0.5);
int index = iRho + iZe * sizeRho + iZh * sizeZe * sizeRho;
if (iRho < 0 || iZe < 0 || iZh < 0) {// || index > sizeRho * sizeZe * sizeZh) {
// printf("\n Illegal memory access: index = %d, iRho = %d, iZe = %d, iZh = %d, wf[index] set to 0 \n ", index, iRho, iZe, iZh);
psi = 0;
}
else if (iRho >= sizeRho || iZe >= sizeZe || iZh >= sizeZh)
psi = 0;
else
psi = wf[index];
return psi;
}
__device__ double psi_1s_QW_analytical(double a0, double L, double S, double rho, double z_e, double z_h) {
const double pi = 3.14159265359;
double psi;
if (abs(z_e) <= L / 2 && abs(z_h) <= L / 2)
psi = 4 / (a0 * L) / sqrt(2 * pi) / sqrt(S) * exp(-rho / (a0)) * cos(pi / L * z_e) * cos(pi / L * z_h);
else
psi = 0;
return psi;
}
/* calculates the additional potential between in X-e system (excluding e1-h1)
fix is the 'nonzeroness': V = const / (r + fix) */
__device__ double V_I_pot(double r_e1h2, double r_e2h1, double r_e1e2, double r_h1h2) {
const double pi = 3.14159265359;
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
const double a0_hh = 1.152714e-08;// m
double V_I;
// we introduce 'fix' to never worry about NaNs
/*if (r_e1h2 == 0 || r_e2h1 == 0 || r_e1e2 == 0 || r_h1h2 == 0)
V_I = e2eps * (1 / (r_e1e2 + fix) + 1 / (r_h1h2 + fix) - 1 / (r_e1h2 + fix) - 1 / (r_e1h2 + fix)); // sum of coulomb potentials
else*/
V_I = e2eps * (1 / r_e1e2 + 1 / r_h1h2 - 1 / r_e1h2 - 1 / r_e2h1);
return V_I;
}
/* e-h attraction potential
fix is the 'nonzeroness': V = const / (r + fix) */
__device__ double V_eh_pot(double r_eh, double fix) {
const double pi = 3.14159265359;
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
const double a0_hh = 1.152714e-08;// m
double V_eh = e2eps * 1 / (r_eh + fix);
return V_eh;
}
/* ------------------------------------------------ __global__ kernel functions ------------------------------------------------------------------ */
/* used to initialize the random states */
__global__ void initRand(unsigned int seed, int runCounter, hiprandState_t* states) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// nvidia recommends using the same seed but monotonically increasing sequence numbers when dealing with multiple kernel launches
// but that is much much slower, so best to leave runCounter=0
for (int i = index; i < N; i += stride)
hiprand_init(seed, N * runCounter + i, 0, &states[i]);
}
/* calculates J_exch^{e-e}(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_exch(hiprandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double rho_e1h2, rho_e2h1; // 2d distances for psi_e1h2, psi_e2h1
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2, psi_e1h2, psi_e2h1; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contain q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = hiprand_uniform_double(&states[dim * i + 0]);
rands[1] = hiprand_uniform_double(&states[dim * i + 1]);
rands[2] = hiprand_uniform_double(&states[dim * i + 2]);
rands[3] = hiprand_uniform_double(&states[dim * i + 3]);
rands[4] = hiprand_uniform_double(&states[dim * i + 4]);
rands[5] = hiprand_uniform_double(&states[dim * i + 5]);
rands[6] = hiprand_uniform_double(&states[dim * i + 6]);
rands[7] = hiprand_uniform_double(&states[dim * i + 7]);
rands[8] = hiprand_uniform_double(&states[dim * i + 8]);
rands[9] = hiprand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// get 2d vector lengths for psi_e1h2, psi_e2h1:
rho_e1h2 = sqrt(rho_e1h2_sq);
rho_e2h1 = sqrt(rho_e2h1_sq);
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
psi_e1h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h2, z_e1, z_h2);
psi_e2h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h1, z_e2, z_h1);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * (m_hh - m_e) / M * xi * cos(phi_xi)
+ q * (2 * mu_hh / M * (rho_e2h2 * cos(phi_e2h2) - rho_e1h1 * cos(phi_e1h1)));
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h2 * psi_e2h1 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* calculates J_exch^{e-e}(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_exch_hh(hiprandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double rho_e1h2, rho_e2h1; // 2d distances for psi_e1h2, psi_e2h1
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2, psi_e1h2, psi_e2h1; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contain q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = hiprand_uniform_double(&states[dim * i + 0]);
rands[1] = hiprand_uniform_double(&states[dim * i + 1]);
rands[2] = hiprand_uniform_double(&states[dim * i + 2]);
rands[3] = hiprand_uniform_double(&states[dim * i + 3]);
rands[4] = hiprand_uniform_double(&states[dim * i + 4]);
rands[5] = hiprand_uniform_double(&states[dim * i + 5]);
rands[6] = hiprand_uniform_double(&states[dim * i + 6]);
rands[7] = hiprand_uniform_double(&states[dim * i + 7]);
rands[8] = hiprand_uniform_double(&states[dim * i + 8]);
rands[9] = hiprand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// get 2d vector lengths for psi_e1h2, psi_e2h1:
rho_e1h2 = sqrt(rho_e1h2_sq);
rho_e2h1 = sqrt(rho_e2h1_sq);
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
psi_e1h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h2, z_e1, z_h2);
psi_e2h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h1, z_e2, z_h1);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * (m_hh - m_e) / M * xi * cos(phi_xi)
+ q * (2 * mu_hh / M * (rho_e2h2 * cos(phi_e2h2) - rho_e1h1 * cos(phi_e1h1)));
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h2 * psi_e2h1 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* calculates J_dir(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_dir(hiprandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contains q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = hiprand_uniform_double(&states[dim * i + 0]);
rands[1] = hiprand_uniform_double(&states[dim * i + 1]);
rands[2] = hiprand_uniform_double(&states[dim * i + 2]);
rands[3] = hiprand_uniform_double(&states[dim * i + 3]);
rands[4] = hiprand_uniform_double(&states[dim * i + 4]);
rands[5] = hiprand_uniform_double(&states[dim * i + 5]);
rands[6] = hiprand_uniform_double(&states[dim * i + 6]);
rands[7] = hiprand_uniform_double(&states[dim * i + 7]);
rands[8] = hiprand_uniform_double(&states[dim * i + 8]);
rands[9] = hiprand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * xi * cos(phi_xi);
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h1 * psi_e2h2 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* same, but calculates Ex: <psi | V_eh | psi>*/
__global__ void intMC_Ex(hiprandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.51 * m0; // eff mass of heavy holes in GaAs
const double mu_hh = 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh
const double M_h = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.7884322117e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
double fix = gpu_X_wf_params->fix;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (z_e1, z_e2, z_h) -- 7 coords in total
double rho_eh, phi_eh, z_e, z_h;
double r_eh = 0;// distances for potential V_I
double psi_eh; // exc. wavefunc of exciton
double V_eh; // value of potential V_eh_pot
double detTheta;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPoints; i += stride) {
rho_eh = hiprand_uniform(&states[dim * i + 0]) * dZ * (sizeRho - 1);
phi_eh = hiprand_uniform(&states[dim * i + 2]) * 2 * pi;
z_e = hiprand_uniform(&states[dim * i + 4]) * dZ * (sizeZe - 1);
z_h = hiprand_uniform(&states[dim * i + 6]) * dZ * (sizeZh - 1);
// calc distances for potential (simple to check expressions)
r_eh = sqrt(pow(rho_eh, 2) + pow(z_e - z_h, 2));
// evaluate the V_I potential
V_eh = V_eh_pot(r_eh, fix);
// evaluate wave functions
psi_eh = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_eh, z_e, z_h);
// don't forget about the jacobian
detTheta = rho_eh;// jacobi determinant of new symmetrical double cylindrical relative coordinates
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * psi_eh * V_eh * psi_eh / e * 1e6; // in micro eV
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
| c7cb234eb37f4a37fb281c34c15d824d9576eb88.cu | #include "gpu_functions.h"
/* ---------------------------------- __device__ functions (helper funcs, wave funcs, potential etc.) ------------------------------------------- */
/* returns the closest discrete wavefunction value for given continuous arguments*/
__device__ double psi_1s_QW(wf_parameter_struct* gpu_X_wf_params, double* wf, double rho, double z_e, double z_h) {
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
int iRho, iZe, iZh;
double psi;
iRho = floor(rho / dZ + 0.5) - 1;
iZe = floor((z_e + dZ * (sizeZe + 1) / 2) / dZ + 0.5);
iZh = floor((z_h + dZ * (sizeZe + 1) / 2) / dZ + 0.5);
int index = iRho + iZe * sizeRho + iZh * sizeZe * sizeRho;
if (iRho < 0 || iZe < 0 || iZh < 0) {// || index > sizeRho * sizeZe * sizeZh) {
// printf("\n Illegal memory access: index = %d, iRho = %d, iZe = %d, iZh = %d, wf[index] set to 0 \n ", index, iRho, iZe, iZh);
psi = 0;
}
else if (iRho >= sizeRho || iZe >= sizeZe || iZh >= sizeZh)
psi = 0;
else
psi = wf[index];
return psi;
}
__device__ double psi_1s_QW_analytical(double a0, double L, double S, double rho, double z_e, double z_h) {
const double pi = 3.14159265359;
double psi;
if (abs(z_e) <= L / 2 && abs(z_h) <= L / 2)
psi = 4 / (a0 * L) / sqrt(2 * pi) / sqrt(S) * exp(-rho / (a0)) * cos(pi / L * z_e) * cos(pi / L * z_h);
else
psi = 0;
return psi;
}
/* calculates the additional potential between in X-e system (excluding e1-h1)
fix is the 'nonzeroness': V = const / (r + fix) */
__device__ double V_I_pot(double r_e1h2, double r_e2h1, double r_e1e2, double r_h1h2) {
const double pi = 3.14159265359;
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
const double a0_hh = 1.152714e-08;// m
double V_I;
// we introduce 'fix' to never worry about NaNs
/*if (r_e1h2 == 0 || r_e2h1 == 0 || r_e1e2 == 0 || r_h1h2 == 0)
V_I = e2eps * (1 / (r_e1e2 + fix) + 1 / (r_h1h2 + fix) - 1 / (r_e1h2 + fix) - 1 / (r_e1h2 + fix)); // sum of coulomb potentials
else*/
V_I = e2eps * (1 / r_e1e2 + 1 / r_h1h2 - 1 / r_e1h2 - 1 / r_e2h1);
return V_I;
}
/* e-h attraction potential
fix is the 'nonzeroness': V = const / (r + fix) */
__device__ double V_eh_pot(double r_eh, double fix) {
const double pi = 3.14159265359;
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
const double a0_hh = 1.152714e-08;// m
double V_eh = e2eps * 1 / (r_eh + fix);
return V_eh;
}
/* ------------------------------------------------ __global__ kernel functions ------------------------------------------------------------------ */
/* used to initialize the random states */
__global__ void initRand(unsigned int seed, int runCounter, curandState_t* states) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// nvidia recommends using the same seed but monotonically increasing sequence numbers when dealing with multiple kernel launches
// but that is much much slower, so best to leave runCounter=0
for (int i = index; i < N; i += stride)
curand_init(seed, N * runCounter + i, 0, &states[i]);
}
/* calculates J_exch^{e-e}(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_exch(curandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double rho_e1h2, rho_e2h1; // 2d distances for psi_e1h2, psi_e2h1
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2, psi_e1h2, psi_e2h1; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contain q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = curand_uniform_double(&states[dim * i + 0]);
rands[1] = curand_uniform_double(&states[dim * i + 1]);
rands[2] = curand_uniform_double(&states[dim * i + 2]);
rands[3] = curand_uniform_double(&states[dim * i + 3]);
rands[4] = curand_uniform_double(&states[dim * i + 4]);
rands[5] = curand_uniform_double(&states[dim * i + 5]);
rands[6] = curand_uniform_double(&states[dim * i + 6]);
rands[7] = curand_uniform_double(&states[dim * i + 7]);
rands[8] = curand_uniform_double(&states[dim * i + 8]);
rands[9] = curand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// get 2d vector lengths for psi_e1h2, psi_e2h1:
rho_e1h2 = sqrt(rho_e1h2_sq);
rho_e2h1 = sqrt(rho_e2h1_sq);
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
psi_e1h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h2, z_e1, z_h2);
psi_e2h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h1, z_e2, z_h1);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * (m_hh - m_e) / M * xi * cos(phi_xi)
+ q * (2 * mu_hh / M * (rho_e2h2 * cos(phi_e2h2) - rho_e1h1 * cos(phi_e1h1)));
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h2 * psi_e2h1 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* calculates J_exch^{e-e}(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_exch_hh(curandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double rho_e1h2, rho_e2h1; // 2d distances for psi_e1h2, psi_e2h1
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2, psi_e1h2, psi_e2h1; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contain q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = curand_uniform_double(&states[dim * i + 0]);
rands[1] = curand_uniform_double(&states[dim * i + 1]);
rands[2] = curand_uniform_double(&states[dim * i + 2]);
rands[3] = curand_uniform_double(&states[dim * i + 3]);
rands[4] = curand_uniform_double(&states[dim * i + 4]);
rands[5] = curand_uniform_double(&states[dim * i + 5]);
rands[6] = curand_uniform_double(&states[dim * i + 6]);
rands[7] = curand_uniform_double(&states[dim * i + 7]);
rands[8] = curand_uniform_double(&states[dim * i + 8]);
rands[9] = curand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// get 2d vector lengths for psi_e1h2, psi_e2h1:
rho_e1h2 = sqrt(rho_e1h2_sq);
rho_e2h1 = sqrt(rho_e2h1_sq);
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
psi_e1h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h2, z_e1, z_h2);
psi_e2h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h1, z_e2, z_h1);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * (m_hh - m_e) / M * xi * cos(phi_xi)
+ q * (2 * mu_hh / M * (rho_e2h2 * cos(phi_e2h2) - rho_e1h1 * cos(phi_e1h1)));
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h2 * psi_e2h1 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* calculates J_dir(q) using MC method
stores numPoints function values in gpu_f and as much squares in gpu_f2 */
__global__ void intMC_J_xx_dir(curandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L, int dim, double q) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.35 * m0; // eff mass of heavy holes in GaAs along z
const double mu_hh = 0.0417 * m0; // 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh with in-plane m_hh
const double M = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.8412430591e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
// 1.8412430591e-29 ~ eps = 12.53, 1.78843221e-29 ~ eps = 12.9
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (xi, phi_xi) + (z_e1, z_e2, z_h1, z_h2) -- 10 coords in total
double rho_e1h1, phi_e1h1, z_e1, z_h1;
double rho_e2h2, phi_e2h2, z_e2, z_h2;
double xi, phi_xi;
double r_e1h2 = 0, r_e2h1 = 0, r_e1e2 = 0, r_h1h2 = 0; // 3d distances for potential V_I
double psi_e1h1, psi_e2h2; // exciton wavefunctions
double V_I; // value of potential V_I
double q_factor_real, q_factor_im, q_factor_arg; // contains q-dependency assuming Q = Q' = 0
double detTheta; // Jacobian
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double rands[10];
for (int i = index; i < numPoints; i += stride) {
rands[0] = curand_uniform_double(&states[dim * i + 0]);
rands[1] = curand_uniform_double(&states[dim * i + 1]);
rands[2] = curand_uniform_double(&states[dim * i + 2]);
rands[3] = curand_uniform_double(&states[dim * i + 3]);
rands[4] = curand_uniform_double(&states[dim * i + 4]);
rands[5] = curand_uniform_double(&states[dim * i + 5]);
rands[6] = curand_uniform_double(&states[dim * i + 6]);
rands[7] = curand_uniform_double(&states[dim * i + 7]);
rands[8] = curand_uniform_double(&states[dim * i + 8]);
rands[9] = curand_uniform_double(&states[dim * i + 9]);
rho_e1h1 = dZ * (1.0 + rands[0] * (sizeRho + 1));
phi_e1h1 = 2 * pi * rands[1];
z_e1 = dZ * (rands[2] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h1 = dZ * (rands[3] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
rho_e2h2 = dZ * (1.0 + rands[4] * (sizeRho + 1));
phi_e2h2 = 2 * pi * rands[5];
z_e2 = dZ * (rands[6] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
z_h2 = dZ * (rands[7] * (sizeZe + 1) - (double)(sizeZe + 1) / 2);
// theoretically '2 * (sizeRho + 1)' is the largest upper bound for xi, larger ones lead to gpu_f[i] = 0
// investigate!
xi = dZ + dZ * rands[8] * (2 * (sizeRho + 1));
phi_xi = 2 * pi * rands[9];
// now let's calculate other necessary distances: rho/r_e1h2, rho/r_e2h1, r_e1e2, r_h1h2 -------------------------------------------------
double rho_1_sq = pow(rho_e1h1, 2);
double rho_2_sq = pow(rho_e2h2, 2);
double xi_sq = pow(xi, 2);
double rho_e1h2_sq, rho_e2h1_sq, rho_e1e2_sq, rho_h1h2_sq;
// doubled scalar products:
double t_xi_rho_1 = 2 * xi * rho_e1h1 * cos(phi_xi - phi_e1h1);
double t_xi_rho_2 = 2 * xi * rho_e2h2 * cos(phi_xi - phi_e2h2);
double t_rho_1_rho_2 = 2 * rho_e1h1 * rho_e2h2 * cos(phi_e1h1 - phi_e2h2);
// assemble necessary 2d vector squares:
rho_e1h2_sq = (xi_sq + pow(m_hh / M, 2) * rho_1_sq + pow(m_e / M, 2) * rho_2_sq // for w.f.and potential
+ m_hh / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e2h1_sq = (xi_sq + pow(m_e / M, 2) * rho_1_sq + pow(m_hh / M, 2) * rho_2_sq // for w.f.and potential
- m_e / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
+ m_e * m_hh / pow(M, 2) * t_rho_1_rho_2);
rho_e1e2_sq = (xi_sq + pow(m_hh / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
+ m_hh / M * t_xi_rho_1
- m_hh / M * t_xi_rho_2
- pow(m_hh / M, 2) * t_rho_1_rho_2);
rho_h1h2_sq = (xi_sq + pow(m_e / M, 2) * (rho_1_sq + rho_2_sq) // only for the potential
- m_e / M * t_xi_rho_1
+ m_e / M * t_xi_rho_2
- pow(m_e / M, 2) * t_rho_1_rho_2);
// assemble 3d distances for V_I potential:
r_e1h2 = sqrt(rho_e1h2_sq + pow(z_e1 - z_h2, 2));
r_e2h1 = sqrt(rho_e2h1_sq + pow(z_e2 - z_h1, 2));
r_e1e2 = sqrt(rho_e1e2_sq + pow(z_e1 - z_e2, 2));
r_h1h2 = sqrt(rho_h1h2_sq + pow(z_h1 - z_h2, 2));
// now, calculate wavefunctions:
psi_e1h1 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e1h1, z_e1, z_h1);
psi_e2h2 = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_e2h2, z_e2, z_h2);
// calculate potential:
V_I = V_I_pot(r_e1h2, r_e2h1, r_e1e2, r_h1h2);
// evalute q-factor
double q_factor_arg = q * xi * cos(phi_xi);
q_factor_real = cos(q_factor_arg);
q_factor_im = sin(q_factor_arg);
// finally, calculate the Jacobian:
detTheta = rho_e1h1 * rho_e2h2 * xi;
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * q_factor_real * psi_e1h1 * psi_e2h2 * V_I * psi_e1h1 * psi_e2h2 / e * 1e6 * (S_real * 1e12); // in micro eV * micro m
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e1h1 * psi_e2h2 * psi_e2h2; // double norm
//gpu_f[i] = S_real * detTheta * psi_e1h1 * psi_e2h2 * psi_e1h2 * psi_e2h1; // overlap integral
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
/* same, but calculates Ex: <psi | V_eh | psi>*/
__global__ void intMC_Ex(curandState_t* states, double* gpu_f, double* gpu_f2, double* gpu_wf, wf_parameter_struct* gpu_X_wf_params, double L) {
const double pi = 3.14159265359;
const double m0 = 9.109383561e-31; // free electron mass, kg
const double m_e = 0.067 * m0; // eff e mass in GaAs
const double m_hh = 0.51 * m0; // eff mass of heavy holes in GaAs
const double mu_hh = 1.0 / (1.0 / m_e + 1.0 / m_hh); // reduced mass of e-hh
const double M_h = m_e + m_hh;
const double a0_hh = 1.152714e-08; // Xhh Bohr radius, m
const double e = 1.602176634e-19; // elementary charge, coulombs
const double e2eps = 1.7884322117e-29; // in SI (J*m); e2eps = e^2/(4pi * eps * eps0), so V(r) = e2eps/r
double dZ = gpu_X_wf_params->dZ;
int sizeRho = gpu_X_wf_params->sizeRho;
int sizeZe = gpu_X_wf_params->sizeZe;
int sizeZh = gpu_X_wf_params->sizeZh;
double S_real = gpu_X_wf_params->S_real;
double fix = gpu_X_wf_params->fix;
// 2d polar relative coordinates give detTheta = 1; 2d centre-mass coords are integrated and give an S multiplier
// we are left with (rho_eh, phi_eh)x2 + (z_e1, z_e2, z_h) -- 7 coords in total
double rho_eh, phi_eh, z_e, z_h;
double r_eh = 0;// distances for potential V_I
double psi_eh; // exc. wavefunc of exciton
double V_eh; // value of potential V_eh_pot
double detTheta;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPoints; i += stride) {
rho_eh = curand_uniform(&states[dim * i + 0]) * dZ * (sizeRho - 1);
phi_eh = curand_uniform(&states[dim * i + 2]) * 2 * pi;
z_e = curand_uniform(&states[dim * i + 4]) * dZ * (sizeZe - 1);
z_h = curand_uniform(&states[dim * i + 6]) * dZ * (sizeZh - 1);
// calc distances for potential (simple to check expressions)
r_eh = sqrt(pow(rho_eh, 2) + pow(z_e - z_h, 2));
// evaluate the V_I potential
V_eh = V_eh_pot(r_eh, fix);
// evaluate wave functions
psi_eh = psi_1s_QW(gpu_X_wf_params, gpu_wf, rho_eh, z_e, z_h);
// don't forget about the jacobian
detTheta = rho_eh;// jacobi determinant of new symmetrical double cylindrical relative coordinates
// now we simply evaluate the complete integrand
gpu_f[i] = S_real * detTheta * psi_eh * V_eh * psi_eh / e * 1e6; // in micro eV
//printf("\ngpu_f[%d] = S_real * detTheta * psi_e1h1 * psi_e2 * (V_I / e * 1e6) * psi_e2h1 * psi_e1 = %e * %e * %e * %e * %e * %e * %e", i, S_real, detTheta, psi_e1h1, psi_e2, (V_I / e * 1e6), psi_e2h1, psi_e1);
gpu_f2[i] = gpu_f[i] * gpu_f[i]; // here we store their squares to get <f^2> -> int error
}
}
|
d0b82ae23efbf573b2887028be64cd76bb627e76.hip | // !!! This is a file automatically generated by hipify!!!
//
// This CUDA kernel was generated by CUDA Visual Studio 2012 Template
//
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| d0b82ae23efbf573b2887028be64cd76bb627e76.cu | //
// This CUDA kernel was generated by CUDA Visual Studio 2012 Template
//
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
84b474d768cc0fbd43c41d0ed5141890fc928c93.hip | // !!! This is a file automatically generated by hipify!!!
#include "PSOCuda.cuh"
#include <stdexcept>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/functional.h>
#include <thrust/extrema.h>
extern "C" __device__ __device_builtin__ void __syncthreads();
extern "C" __device__ __device_builtin__ float fminf(float x, float y);
extern "C" __device__ __device_builtin__ float fmaxf(float x, float y);
extern "C" __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val);
extern "C" __device__ __device_builtin__ void __threadfence_system(void);
#define MAX_DIMENSIONS 20
__constant__ float _c_minPosition[MAX_DIMENSIONS];
__constant__ float _c_maxPosition[MAX_DIMENSIONS];
__forceinline__ __device__ float EvalBanana(float *position)
{
float x = position[0];
float y = position[1];
float a = y - x * x;
float b = 1 - x;
return 100 * a*a + b*b;
}
__global__ void k_InitPSO(
int numParticles,
int numDimensions,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
hiprandState_t *_s)
{
__shared__ float bestFitness[1024];
__shared__ int ptrs[1024];
int idx = threadIdx.x;
int ptr_g0 = blockDim.x * blockIdx.x;
int gidx = ptr_g0 + idx;
if (gidx >= numParticles)
bestFitness[idx] = FLT_MAX;
__syncthreads();
int ptr_g = gidx * numDimensions; // posio na memoria
if (gidx < numParticles)
{
hiprand_init(threadIdx.x, 0, 0, &_s[idx]);
// Calculate randon pos & vel
for (int d = 0; d < numDimensions; ++d)
{
float min = _c_minPosition[d];
float max = _c_maxPosition[d];
_positions[ptr_g + d] = hiprand_uniform(&_s[idx])*(max - min) + min;
_velocities[ptr_g + d] = hiprand_uniform(&_s[idx])*(max - min) + min;
}
// Initizalizes local bests
bestFitness[idx] = EvalBanana(&_positions[ptr_g]);
}
__syncthreads();
// Descobre a melhor
ptrs[idx] = idx;
for (int s = 1024 / 2; s > 0; s /= 2)
{
if (idx < s)
{
if (bestFitness[ptrs[idx]] > bestFitness[ptrs[idx + s]])
{
int tmp = ptrs[idx + s];
ptrs[idx + s] = ptrs[idx];
ptrs[idx] = tmp;
}
}
__syncthreads();
}
if (gidx < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
_bestPositions[ptr_g + d] = _positions[ptr_g + d];
_bestFitness[gidx] = bestFitness[idx];
if (idx < numDimensions)
_bestGlobalPosition[blockIdx.x * numDimensions + idx] = _positions[(ptr_g0 + ptrs[0]) * numDimensions + idx];
if (idx == 0)
_bestGlobalFitness[blockIdx.x] = bestFitness[ptrs[0]];
}
}
__global__ void k_IterateMultiBlock(
int numParticles,
int numDimensions,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
hiprandState_t *_s)
{
__shared__ int ptrs[1024];
__shared__ float bestFitness[1024];
float bestGlobalFitness;
int p = threadIdx.x;
int block = blockIdx.x;
int ptr_g0 = blockDim.x * block;
int gp = ptr_g0 + p;
if (gp < numParticles)
{
bestFitness[p] = _bestFitness[gp];
}
if (p == 0)
bestGlobalFitness = _bestGlobalFitness[0];
else if (gp >= numParticles)
bestFitness[p] = FLT_MAX;
__syncthreads();
int ptr_g = gp * numDimensions;
if (gp < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
{
float r1 = hiprand_uniform(&_s[p]);
float r2 = hiprand_uniform(&_s[p]);
int ptr = ptr_g + d;
float position = _positions[ptr];
float newVelocity = (W * _velocities[ptr]) +
(C1 * r1 * (_bestPositions[ptr] - position)) +
(C2 * r2 * (_bestGlobalPosition[block * numDimensions + d] - position));
newVelocity = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newVelocity));
_velocities[ptr] = newVelocity;
float newPosition = position + newVelocity;
newPosition = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newPosition));
_positions[ptr] = newPosition;
}
float newFitness = EvalBanana(&_positions[ptr_g]);
if (newFitness < bestFitness[p])
{
bestFitness[p] = newFitness;
for (int d = 0; d < numDimensions; ++d)
{
int ptr = ptr_g + d;
_bestPositions[ptr] = _positions[ptr];
}
}
}
__syncthreads();
// Descobre a melhor
ptrs[p] = p;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (p < s)
{
if (bestFitness[ptrs[p]] > bestFitness[ptrs[p + s]])
{
int tmp = ptrs[p + s];
ptrs[p + s] = ptrs[p];
ptrs[p] = tmp;
}
}
__syncthreads();
}
if (p == 0)
{
if (bestFitness[ptrs[0]] < bestGlobalFitness)
{
bestGlobalFitness = bestFitness[ptrs[0]];
for (int d = 0; d < numDimensions; ++d)
_bestGlobalPosition[block * numDimensions + d] = _positions[(ptr_g0 + ptrs[0]) * numDimensions + d];
}
}
__syncthreads();
if (gp < numParticles)
_bestFitness[gp] = bestFitness[p];
if (p == 0)
_bestGlobalFitness[block] = bestGlobalFitness;
}
__global__ void k_IterateSingleBlock(
int numParticles,
int numDimensions,
int numIterations,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
hiprandState_t *_s)
{
__shared__ int ptrs[1024];
__shared__ float bestFitness[1024];
float bestGlobalFitness;
int p = threadIdx.x;
int block = blockIdx.x;
if (p < numParticles)
bestFitness[p] = _bestFitness[p];
if (p == 0)
bestGlobalFitness = _bestGlobalFitness[0];
else if (p >= numParticles)
bestFitness[p] = FLT_MAX;
__syncthreads();
int ptr_g = p * numDimensions;
for (int it = 0; it < numIterations; ++it)
{
if (p < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
{
float r1 = hiprand_uniform(&_s[p]);
float r2 = hiprand_uniform(&_s[p]);
int ptr = ptr_g + d;
float position = _positions[ptr];
float newVelocity = (W * _velocities[ptr]) +
(C1 * r1 * (_bestPositions[ptr] - position)) +
(C2 * r2 * (_bestGlobalPosition[block * numDimensions + d] - position));
newVelocity = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newVelocity));
_velocities[ptr] = newVelocity;
float newPosition = position + newVelocity;
newPosition = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newPosition));
_positions[ptr] = newPosition;
}
float newFitness = EvalBanana(&_positions[p * numDimensions]);
if (newFitness < bestFitness[p])
{
bestFitness[p] = newFitness;
for (int d = 0; d < numDimensions; ++d)
{
_bestPositions[ptr_g + d] = _positions[ptr_g + d];
}
}
}
__syncthreads();
// Descobre a melhor
ptrs[p] = p;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (p < s)
{
if (bestFitness[ptrs[p]] > bestFitness[ptrs[p + s]])
{
int tmp = ptrs[p + s];
ptrs[p + s] = ptrs[p];
ptrs[p] = tmp;
}
}
__syncthreads();
}
}
if (p == 0)
{
if (bestFitness[ptrs[0]] < bestGlobalFitness)
{
bestGlobalFitness = bestFitness[ptrs[0]];
for (int d = 0; d < numDimensions; ++d)
{
_bestGlobalPosition[block * numDimensions + d] = _positions[ptrs[0] * numDimensions + d];
}
}
}
__syncthreads();
if (p < numParticles)
_bestFitness[p] = bestFitness[p];
if (p == 0)
_bestGlobalFitness[block] = bestGlobalFitness;
}
__global__ void k_minimum(int _numBlocks, int numDimensions, float *_position, float *_fitness)
{
__shared__ float fitness[1024];
__shared__ int ptrs[1024];
int idx = threadIdx.x;
ptrs[idx] = idx;
if (idx >= _numBlocks)
fitness[idx] = FLT_MAX;
__syncthreads();
if (idx < _numBlocks)
fitness[idx] = _fitness[idx];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (idx < s)
{
if (fitness[ptrs[idx]] > fitness[ptrs[idx + s]])
{
int tmp = ptrs[idx + s];
ptrs[idx + s] = ptrs[idx];
ptrs[idx] = tmp;
}
}
__syncthreads();
}
if (idx < numDimensions)
_position[idx] = _position[ptrs[0] * numDimensions + idx];
if (idx == 0)
_fitness[0] = _fitness[ptrs[0]];
}
PSOCuda::PSOCuda(int numParticles, int numDimensions, float *minPositions, float *maxPositions)
:
PSOBase(numParticles, numDimensions, minPositions, maxPositions),
_d_positions(_positions.size()),
_d_velocities(_velocities.size()),
_d_minPositions(_minPositions),
_d_maxPositions(_maxPositions),
_d_bestPositions(_bestPositions.size()),
_d_bestFitness(_bestFitness.size()),
_d_state(numParticles)
{
if (_numDimensions > MAX_DIMENSIONS)
throw new exception("_numDimensions > MAX_DIMENSIONS");
CalculateGeometry();
_d_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_d_bestGlobalFitness.resize(_numBlocks);
_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_bestGlobalFitness.resize(_numBlocks);
hipMemcpyToSymbol(_c_minPosition, _minPositions.data(), _minPositions.size() * sizeof(float));
hipMemcpyToSymbol(_c_maxPosition, _maxPositions.data(), _maxPositions.size() * sizeof(float));
}
void PSOCuda::Init()
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
hipLaunchKernelGGL(( k_InitPSO), dim3(_numBlocks), dim3(threadNumber), 0, 0, _numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
hipDeviceSynchronize();
hipLaunchKernelGGL(( k_minimum), dim3(1), dim3(blockNumber), 0, 0, _numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
UpdateHost();
}
void PSOCuda::Iterate(int n)
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
if (blockNumber == 1)
{
hipLaunchKernelGGL(( k_IterateSingleBlock), dim3(_numBlocks), dim3(threadNumber), 0, 0, _numParticles, _numDimensions, n,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
}
else
{
for (int i = 0; i < n; ++i)
{
hipLaunchKernelGGL(( k_IterateMultiBlock), dim3(_numBlocks), dim3(threadNumber), 0, 0, _numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
hipLaunchKernelGGL(( k_minimum), dim3(1), dim3(blockNumber), 0, 0, _numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
}
}
_iteration += n;
UpdateHost();
}
void PSOCuda::UpdateHost()
{
_positions = _d_positions;
_velocities = _d_velocities;
_minPositions = _d_minPositions;
_maxPositions = _d_maxPositions;
_bestPositions = _d_bestPositions;
_bestFitness = _d_bestFitness;
_bestGlobalPosition = _d_bestGlobalPosition;
_bestGlobalFitness = _d_bestGlobalFitness;
}
void PSOCuda::CalculateGeometry()
{
int numDevices;
hipGetDeviceCount(&numDevices);
if (numDevices < 1)
throw std::exception("Nenhum dispositivo cuda");
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
int maxThreads = devProp.maxThreadsPerBlock;
_numThreads = (_numParticles + 31 ) / 32 * 32;
_numThreads = ::min(((_numThreads + 31)/32)*32, maxThreads);
_numBlocks = (_numParticles + _numThreads - 1) / _numThreads;
} | 84b474d768cc0fbd43c41d0ed5141890fc928c93.cu | #include "PSOCuda.cuh"
#include <stdexcept>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/functional.h>
#include <thrust/extrema.h>
extern "C" __device__ __device_builtin__ void __syncthreads();
extern "C" __device__ __device_builtin__ float fminf(float x, float y);
extern "C" __device__ __device_builtin__ float fmaxf(float x, float y);
extern "C" __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val);
extern "C" __device__ __device_builtin__ void __threadfence_system(void);
#define MAX_DIMENSIONS 20
__constant__ float _c_minPosition[MAX_DIMENSIONS];
__constant__ float _c_maxPosition[MAX_DIMENSIONS];
__forceinline__ __device__ float EvalBanana(float *position)
{
float x = position[0];
float y = position[1];
float a = y - x * x;
float b = 1 - x;
return 100 * a*a + b*b;
}
__global__ void k_InitPSO(
int numParticles,
int numDimensions,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
curandState *_s)
{
__shared__ float bestFitness[1024];
__shared__ int ptrs[1024];
int idx = threadIdx.x;
int ptr_g0 = blockDim.x * blockIdx.x;
int gidx = ptr_g0 + idx;
if (gidx >= numParticles)
bestFitness[idx] = FLT_MAX;
__syncthreads();
int ptr_g = gidx * numDimensions; // posição na memoria
if (gidx < numParticles)
{
curand_init(threadIdx.x, 0, 0, &_s[idx]);
// Calculate randon pos & vel
for (int d = 0; d < numDimensions; ++d)
{
float min = _c_minPosition[d];
float max = _c_maxPosition[d];
_positions[ptr_g + d] = curand_uniform(&_s[idx])*(max - min) + min;
_velocities[ptr_g + d] = curand_uniform(&_s[idx])*(max - min) + min;
}
// Initizalizes local bests
bestFitness[idx] = EvalBanana(&_positions[ptr_g]);
}
__syncthreads();
// Descobre a melhor
ptrs[idx] = idx;
for (int s = 1024 / 2; s > 0; s /= 2)
{
if (idx < s)
{
if (bestFitness[ptrs[idx]] > bestFitness[ptrs[idx + s]])
{
int tmp = ptrs[idx + s];
ptrs[idx + s] = ptrs[idx];
ptrs[idx] = tmp;
}
}
__syncthreads();
}
if (gidx < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
_bestPositions[ptr_g + d] = _positions[ptr_g + d];
_bestFitness[gidx] = bestFitness[idx];
if (idx < numDimensions)
_bestGlobalPosition[blockIdx.x * numDimensions + idx] = _positions[(ptr_g0 + ptrs[0]) * numDimensions + idx];
if (idx == 0)
_bestGlobalFitness[blockIdx.x] = bestFitness[ptrs[0]];
}
}
__global__ void k_IterateMultiBlock(
int numParticles,
int numDimensions,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
curandState *_s)
{
__shared__ int ptrs[1024];
__shared__ float bestFitness[1024];
float bestGlobalFitness;
int p = threadIdx.x;
int block = blockIdx.x;
int ptr_g0 = blockDim.x * block;
int gp = ptr_g0 + p;
if (gp < numParticles)
{
bestFitness[p] = _bestFitness[gp];
}
if (p == 0)
bestGlobalFitness = _bestGlobalFitness[0];
else if (gp >= numParticles)
bestFitness[p] = FLT_MAX;
__syncthreads();
int ptr_g = gp * numDimensions;
if (gp < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
{
float r1 = curand_uniform(&_s[p]);
float r2 = curand_uniform(&_s[p]);
int ptr = ptr_g + d;
float position = _positions[ptr];
float newVelocity = (W * _velocities[ptr]) +
(C1 * r1 * (_bestPositions[ptr] - position)) +
(C2 * r2 * (_bestGlobalPosition[block * numDimensions + d] - position));
newVelocity = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newVelocity));
_velocities[ptr] = newVelocity;
float newPosition = position + newVelocity;
newPosition = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newPosition));
_positions[ptr] = newPosition;
}
float newFitness = EvalBanana(&_positions[ptr_g]);
if (newFitness < bestFitness[p])
{
bestFitness[p] = newFitness;
for (int d = 0; d < numDimensions; ++d)
{
int ptr = ptr_g + d;
_bestPositions[ptr] = _positions[ptr];
}
}
}
__syncthreads();
// Descobre a melhor
ptrs[p] = p;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (p < s)
{
if (bestFitness[ptrs[p]] > bestFitness[ptrs[p + s]])
{
int tmp = ptrs[p + s];
ptrs[p + s] = ptrs[p];
ptrs[p] = tmp;
}
}
__syncthreads();
}
if (p == 0)
{
if (bestFitness[ptrs[0]] < bestGlobalFitness)
{
bestGlobalFitness = bestFitness[ptrs[0]];
for (int d = 0; d < numDimensions; ++d)
_bestGlobalPosition[block * numDimensions + d] = _positions[(ptr_g0 + ptrs[0]) * numDimensions + d];
}
}
__syncthreads();
if (gp < numParticles)
_bestFitness[gp] = bestFitness[p];
if (p == 0)
_bestGlobalFitness[block] = bestGlobalFitness;
}
__global__ void k_IterateSingleBlock(
int numParticles,
int numDimensions,
int numIterations,
float *_positions,
float *_velocities,
float *_bestPositions,
float *_bestFitness,
float *_bestGlobalPosition,
float *_bestGlobalFitness,
curandState *_s)
{
__shared__ int ptrs[1024];
__shared__ float bestFitness[1024];
float bestGlobalFitness;
int p = threadIdx.x;
int block = blockIdx.x;
if (p < numParticles)
bestFitness[p] = _bestFitness[p];
if (p == 0)
bestGlobalFitness = _bestGlobalFitness[0];
else if (p >= numParticles)
bestFitness[p] = FLT_MAX;
__syncthreads();
int ptr_g = p * numDimensions;
for (int it = 0; it < numIterations; ++it)
{
if (p < numParticles)
{
for (int d = 0; d < numDimensions; ++d)
{
float r1 = curand_uniform(&_s[p]);
float r2 = curand_uniform(&_s[p]);
int ptr = ptr_g + d;
float position = _positions[ptr];
float newVelocity = (W * _velocities[ptr]) +
(C1 * r1 * (_bestPositions[ptr] - position)) +
(C2 * r2 * (_bestGlobalPosition[block * numDimensions + d] - position));
newVelocity = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newVelocity));
_velocities[ptr] = newVelocity;
float newPosition = position + newVelocity;
newPosition = fmaxf(_c_minPosition[d], fminf(_c_maxPosition[d], newPosition));
_positions[ptr] = newPosition;
}
float newFitness = EvalBanana(&_positions[p * numDimensions]);
if (newFitness < bestFitness[p])
{
bestFitness[p] = newFitness;
for (int d = 0; d < numDimensions; ++d)
{
_bestPositions[ptr_g + d] = _positions[ptr_g + d];
}
}
}
__syncthreads();
// Descobre a melhor
ptrs[p] = p;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (p < s)
{
if (bestFitness[ptrs[p]] > bestFitness[ptrs[p + s]])
{
int tmp = ptrs[p + s];
ptrs[p + s] = ptrs[p];
ptrs[p] = tmp;
}
}
__syncthreads();
}
}
if (p == 0)
{
if (bestFitness[ptrs[0]] < bestGlobalFitness)
{
bestGlobalFitness = bestFitness[ptrs[0]];
for (int d = 0; d < numDimensions; ++d)
{
_bestGlobalPosition[block * numDimensions + d] = _positions[ptrs[0] * numDimensions + d];
}
}
}
__syncthreads();
if (p < numParticles)
_bestFitness[p] = bestFitness[p];
if (p == 0)
_bestGlobalFitness[block] = bestGlobalFitness;
}
__global__ void k_minimum(int _numBlocks, int numDimensions, float *_position, float *_fitness)
{
__shared__ float fitness[1024];
__shared__ int ptrs[1024];
int idx = threadIdx.x;
ptrs[idx] = idx;
if (idx >= _numBlocks)
fitness[idx] = FLT_MAX;
__syncthreads();
if (idx < _numBlocks)
fitness[idx] = _fitness[idx];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2)
{
if (idx < s)
{
if (fitness[ptrs[idx]] > fitness[ptrs[idx + s]])
{
int tmp = ptrs[idx + s];
ptrs[idx + s] = ptrs[idx];
ptrs[idx] = tmp;
}
}
__syncthreads();
}
if (idx < numDimensions)
_position[idx] = _position[ptrs[0] * numDimensions + idx];
if (idx == 0)
_fitness[0] = _fitness[ptrs[0]];
}
PSOCuda::PSOCuda(int numParticles, int numDimensions, float *minPositions, float *maxPositions)
:
PSOBase(numParticles, numDimensions, minPositions, maxPositions),
_d_positions(_positions.size()),
_d_velocities(_velocities.size()),
_d_minPositions(_minPositions),
_d_maxPositions(_maxPositions),
_d_bestPositions(_bestPositions.size()),
_d_bestFitness(_bestFitness.size()),
_d_state(numParticles)
{
if (_numDimensions > MAX_DIMENSIONS)
throw new exception("_numDimensions > MAX_DIMENSIONS");
CalculateGeometry();
_d_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_d_bestGlobalFitness.resize(_numBlocks);
_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_bestGlobalFitness.resize(_numBlocks);
cudaMemcpyToSymbol(_c_minPosition, _minPositions.data(), _minPositions.size() * sizeof(float));
cudaMemcpyToSymbol(_c_maxPosition, _maxPositions.data(), _maxPositions.size() * sizeof(float));
}
void PSOCuda::Init()
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
k_InitPSO<<<_numBlocks, threadNumber>>>(_numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
cudaDeviceSynchronize();
k_minimum<<<1, blockNumber>>>(_numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
UpdateHost();
}
void PSOCuda::Iterate(int n)
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
if (blockNumber == 1)
{
k_IterateSingleBlock<<<_numBlocks, threadNumber>>>(_numParticles, _numDimensions, n,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
}
else
{
for (int i = 0; i < n; ++i)
{
k_IterateMultiBlock<<<_numBlocks, threadNumber>>>(_numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
k_minimum<<<1, blockNumber>>>(_numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
}
}
_iteration += n;
UpdateHost();
}
void PSOCuda::UpdateHost()
{
_positions = _d_positions;
_velocities = _d_velocities;
_minPositions = _d_minPositions;
_maxPositions = _d_maxPositions;
_bestPositions = _d_bestPositions;
_bestFitness = _d_bestFitness;
_bestGlobalPosition = _d_bestGlobalPosition;
_bestGlobalFitness = _d_bestGlobalFitness;
}
void PSOCuda::CalculateGeometry()
{
int numDevices;
cudaGetDeviceCount(&numDevices);
if (numDevices < 1)
throw std::exception("Nenhum dispositivo cuda");
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
int maxThreads = devProp.maxThreadsPerBlock;
_numThreads = (_numParticles + 31 ) / 32 * 32;
_numThreads = std::min(((_numThreads + 31)/32)*32, maxThreads);
_numBlocks = (_numParticles + _numThreads - 1) / _numThreads;
} |
8654ef22de989267771af5f47ece9d4bf94a83df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
using Eigen::MatrixXd;
using Eigen::VectorXd;
using namespace std;
using namespace Eigen;
using namespace thrust;
namespace Leicester
{
namespace CudaLib
{
vector<MatrixXd> GaussianND(const MatrixXd &TP, const MatrixXd &CN, const MatrixXd &A, const MatrixXd &C)
{
vector<MatrixXd> result;// V, Vt, Vx Vxy
int Num = CN.rows();
int N = TP.rows();
int dimensions = TP.cols();
MatrixXd D(N, Num);
D.fill(1.0);
vector<MatrixXd> Derivatives;
Derivatives.push_back(D);
for (int d = 0; d < 3; d++)
{
MatrixXd Dx(N, Num);
Dx.fill(1.0);
Derivatives.push_back(Dx);
}
for (int j = 0; j < Num; j++)
{
vector<VectorXd> FAIn;
for (int d = 0; d < dimensions; d++)
{
VectorXd a1 = A(0, d)*(TP.col(d).array() - CN(j, d));
VectorXd FAI = (-((A(0, d)*(TP.col(d).array() - CN(j, d))).array() * (A(0, d)*(TP.col(d).array() - CN(j, d))).array()) / (C(0, d) *C(0, d))).array().exp();
Derivatives[0].col(j).array() *= FAI.array();
FAIn.push_back(FAI);
}
VectorXd vt = -2 * (A(0, 0) / C(0, 0)) * (A(0, 0) / C(0, 0)) * (TP.col(0).array() - CN(j, 0));
Derivatives[1].col(j) = vt;
VectorXd sumij = VectorXd::Zero(TP.rows());
MatrixXd dS(TP.rows(), dimensions - 1);
for (int d = 1; d < dimensions; d++)
{
dS.col(d - 1) = (-2 * (A(0, d) / C(0, d)) * (A(0, d) / C(0, d)) * (TP.col(d).array() - CN(j, d))).array() * TP.col(d).array();
VectorXd sumi = VectorXd::Zero(TP.rows());
for (int i = 1; i < TP.cols(); i++)
{
sumi.array() = sumi.array() + TP.col(d).array() * TP.col(i).array() * (-2 * (A(0, d) * A(0, d)) / (C(0, d) *C(0, d)) + (4 * (A(0, d) * A(0, d)* A(0, d) * A(0, d)) * ((TP.col(d).array() - CN(j, i)).array() * (TP.col(d).array() - CN(j, i)).array() / (C(0, d) *C(0, d)*C(0, d) *C(0, d)))).array()).array();
}
sumij.array() = sumij.array() + sumi.array();
}
VectorXd sum = dS.rowwise().sum();
Derivatives[2].col(j) = sum;
Derivatives[3].col(j) = sumij;
for (int d = 1; d < Derivatives.size(); d++)
Derivatives[d].col(j).array() *= Derivatives[0].col(j).array();
}
return Derivatives;
}
__global__ void GaussianND_CUDA(double** result, double *TP, dim3 dTP, double *CN, dim3 dCN, double *A, dim3 dA, double *C, dim3 dC)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
__syncthreads();
if (i == 0 & j == 0)
{
printf("start mqd2_CUDA i=%i j =%i \r\n", i, j);
double* D = (double *)malloc(sizeof(double) * dTP.x * dCN.y);
double* Dt = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
double* Dx = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
double* Dxx = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
printf("allocated arrays mqd2_CUDA i=%i j =%i \r\n", i, j);
dim3 threads(32, 32);
//dim3 grid(CNx / threads.x, CNy / threads.y);
dim3 grid(1, 1);
dim3 dimTP(dTP.x, dTP.y);
dim3 dimCN(dCN.x, dCN.y);
dim3 dimA(dA.x, dA.y);
dim3 dimC(dC.x, dC.y);
//printf("TP size=%f", sizeof(TP));
//printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (TP, dimTP);
//gpuErrchk << <1, 1 >> >(hipPeekAtLastError());
//gpuErrchk << <1, 1 >> >(hipDeviceSynchronize());
printf("dimTPx=%i dimTPy=%i dimCNx=%i dimCNy=%i dimAx=%i dimAy=%i dimCx=%i dimCy=%i\r\n", dimTP.x, dimTP.y, dimCN.x, dimCN.y, dimA.x, dimA.y, dimC.x, dimC.y);
Gaussian2d2_CUDA << <1, 1 >> > (D, Dt, Dx, Dxx, TP, dimTP, CN, dimCN, A, dimA, C, dimC);
gpuAssert << <1, 1 >> > (hipPeekAtLastError(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__);
//printf("D size=%f", sizeof(D));
//printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (D, dim3(dTP.y, dTP.y));
//gpuErrchk << <1, 1 >> >(hipPeekAtLastError());
//gpuErrchk << <1, 1 >> >(hipDeviceSynchronize());
//__syncthreads();
result[0] = D;
result[1] = Dt;
result[2] = Dx;
result[3] = Dxx;
}
__syncthreads();
//printf("end mqd2_CUDA");
}
vector<MatrixXd> CudaRBF::Gaussian2D(const MatrixXd &TP, const MatrixXd &CN, const MatrixXd &A, const MatrixXd &C)
{
gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__);
//Allocate the input on host and device
const double *a, *c, *tx, *cn;
a = A.data();
c = C.data();
tx = TP.data();
cn = CN.data();
double *d_a, *d_c, *d_tx, *d_cn;
//size_t *pValue;
//hipDeviceGetLimit(pValue, hipLimit_t::hipLimitMallocHeapSize);
//printf("Heap limit=%i\r\n", &pValue);
hipError_t e = hipMalloc((void **)&d_a, A.rows() * A.cols() * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_a returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * A.rows() * A.cols());
e = hipMalloc((void **)&d_c, C.rows() * C.cols() * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_c returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * C.rows() * C.cols());
e = hipMalloc((void **)&d_tx, TP.rows() * TP.cols() * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_tx returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * TP.rows() * TP.cols());
e = hipMalloc((void **)&d_cn, CN.rows() * CN.cols() * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_tx1 returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * CN.rows() * CN.cols());
e = hipMemcpy(d_a, a, sizeof(double) * A.rows() * A.cols(), hipMemcpyKind::hipMemcpyHostToDevice);
if (e != hipSuccess)
printf("hipMemcpy d_a returned error %s (code %d), line(%d) when copying %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * A.rows() * A.cols());
e = hipMemcpy(d_c, c, sizeof(double) * C.rows() * C.cols(), hipMemcpyKind::hipMemcpyHostToDevice);
if (e != hipSuccess)
printf("hipMemcpy d_c returned error %s (code %d), line(%d) when copying %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * C.rows() * C.cols());
e = hipMemcpy(d_tx, tx, sizeof(double) * TP.rows() * TP.cols(), hipMemcpyKind::hipMemcpyHostToDevice);
if (e != hipSuccess)
printf("hipMemcpy d_tx returned error %s (code %d), line(%d) when copying %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * TP.rows() * TP.cols());
e = hipMemcpy(d_cn, tx, sizeof(double) * CN.rows() * CN.cols(), hipMemcpyKind::hipMemcpyHostToDevice);
if (e != hipSuccess)
printf("hipMemcpy d_tx1 returned error %s (code %d), line(%d) when copying %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * CN.rows() * CN.cols());
//Allocate the output on host and device
double *d_FAI, *d_D, *d_Dt, *d_Dx, *d_Dxx;
double *h_FAI, *h_D, *h_Dt, *h_Dx, *h_Dxx;
int rows = CN.rows();
int cols = TP.rows();
//h_FAI = new double[rows * cols];
h_D = (double*)malloc(sizeof(double) * rows * cols);
h_Dt = (double*)malloc(sizeof(double) * rows * cols);
h_Dx = (double*)malloc(sizeof(double) * rows * cols);
h_Dxx = (double*)malloc(sizeof(double) * rows * cols);
/*e = hipMalloc((void **)&d_FAI, rows * cols * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_FAI returned error %s (code %d), line(%d)\n", hipGetErrorString(e), e, __LINE__);*/
e = hipMalloc((void **)&d_D, rows * cols * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_D returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMalloc((void **)&d_Dt, rows * cols * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_Dt returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMalloc((void **)&d_Dx, rows * cols * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_Dx returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMalloc((void **)&d_Dxx, rows * cols * sizeof(double));
if (e != hipSuccess)
printf("hipMalloc d_Dxx returned error %s (code %d), line(%d) when allocating %i bytes\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimTx(TP.cols(), TP.rows());
dim3 dimA(A.cols(), A.rows());
dim3 dimC(C.cols(), C.rows());
dim3 threads(block_size, block_size);
//dim3 grid(dimTx.x / threads.x, dimTx.y / threads.y);
dim3 grid(1, 1);
//test << < grid, threads >> > ();
//hipDeviceSynchronize();
//printMatrix(tx, dimTx);
Gaussian2d_CUDA << < grid, threads >> > (d_D, d_Dt, d_Dx, d_Dxx, d_tx, dimTx.x, dimTx.y, d_cn, dimTx.x, dimTx.y, d_a, dimA.x, dimA.y, d_c, dimC.x, dimC.y);
//mqd2_CUDA<32>(d_result, d_tx, dimTx.x, dimTx.y, d_tx1, dimTx.x, dimTx.y, d_a, dimA.x, dimA.y, d_c, dimC.x, dimC.y);
gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (hipPeekAtLastError(), __FILE__, __LINE__);
e = hipMemcpy(h_D, d_D, sizeof(double) * rows * cols, hipMemcpyKind::hipMemcpyDeviceToHost);
if (e != hipSuccess)
printf("hipMemcpy d_D returned error %s (code %d), line(%d) when copying%i\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMemcpy(h_Dt, d_Dt, sizeof(double) * rows * cols, hipMemcpyKind::hipMemcpyDeviceToHost);
if (e != hipSuccess)
printf("hipMemcpy d_Dt returned error %s (code %d), line(%d) when copying%i\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMemcpy(h_Dx, d_Dx, sizeof(double) * rows * cols, hipMemcpyKind::hipMemcpyDeviceToHost);
if (e != hipSuccess)
printf("hipMemcpy d_Dx returned error %s (code %d), line(%d) when copying%i\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = hipMemcpy(h_Dxx, d_Dxx, sizeof(double) * rows * cols, hipMemcpyKind::hipMemcpyDeviceToHost);
if (e != hipSuccess)
printf("hipMemcpy d_Dxx returned error %s (code %d), line(%d) when copying%i\n", hipGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
//gpuAssert << <1, 1 >> > (hipDeviceSynchronize());
gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__);
//printMatrix(h_D, dimTx);
MatrixXd D(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapD(h_D, rows, cols);
D = dataMapD.eval();
MatrixXd Dt(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDt(h_Dt, rows, cols);
Dt = dataMapDt.eval();
// printMatrix(h_Dx, dim3(15, 15));
MatrixXd Dx(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDx(h_Dx, rows, cols);
Dx = dataMapDx.eval();
MatrixXd Dxx(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDxx(h_Dxx, rows, cols);
Dxx = dataMapDxx.eval();
free(h_D);
free(h_Dt);
free(h_Dx);
free(h_Dxx);
hipFree(d_D);
hipFree(d_Dt);
hipFree(d_Dx);
hipFree(d_Dxx);
hipFree(d_a);
hipFree(d_c);
hipFree(d_tx);
hipFree(d_cn);
//hipDeviceReset();
return { D, Dt, Dx, Dxx };
}
VectorXd PushAndQueue(double push, VectorXd A, double queue)
{
VectorXd result(A.rows() + 2);
result[0] = push;
for (int i = 0; i < A.rows(); i++)
{
result[i] = A[i];
}
result[A.rows() + 1] = queue;
return result;
}
//int MethodOfLines::MoLiteration(double Tend, double Tdone, double dt, double *G, int GRows, int GCols, double *lamb, int lambRows, int lambCols, double inx2, double r, double K, MatrixXd A1, MatrixXd Aend, MatrixXd H)
//{
// int count = 0;
// while (Tend - Tdone > 1E-8)
// {
// Tdone += dt;
//
// int sizeG = GRows * GCols;
// int sizeLamb = lambRows * lambCols;
// int memG = sizeof(double) * sizeG;
// int memLamb = sizeof(double) * sizeLamb;
//
// double *d_G, *d_lamb, *d_FFF;
// int sizeFFF = GRows * lambCols;
// int memFFF = sizeof(double)* sizeFFF;
//
// double *h_FFF = (double *)malloc(memFFF);
// double *h_CUBLAS = (double *)malloc(memFFF);
//
// checkCudaErrors(hipMalloc((void **)&d_G, memG));
// checkCudaErrors(hipMalloc((void **)&d_lamb, memLamb));
// checkCudaErrors(hipMemcpy(d_G, G, memG, hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(d_lamb, lamb, memLamb, hipMemcpyHostToDevice));
// checkCudaErrors(hipMalloc((void **)&d_FFF, memFFF));
//
// hipblasHandle_t handle;
// checkCudaErrors(hipblasCreate(&handle));
// const double alpha = 1.0;
// const double beta = 1.0;
// checkCudaErrors(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, GRows, lambCols, GCols, &alpha, d_G, GRows, d_lamb, lambRows, &beta, d_FFF, GRows));
//
// checkCudaErrors(hipMemcpy(h_FFF, d_FFF, memFFF, hipMemcpyDeviceToHost));
// printf("after hipblasDgemm:\r\n");
// //double i[] = h_FFF;
// VectorXd FFF = Map<VectorXd >(h_FFF, GRows, lambCols);
// VectorXd fff = PushAndQueue(0, FFF, inx2 - exp(-r*Tdone)*K);
// printf("after PushAndQueue:\r\n");
// MatrixXd HH(A1.cols(), A1.cols());
// HH.row(0) = A1;
// HH.middleRows(1, HH.rows() - 2) = H;
// HH.row(HH.rows() - 1) = Aend;
// printf("after HH construction:\r\n");
// //LLT<MatrixXd> lltOfA(HH);
// //lamb = lltOfA.solve(fff);
//
// hipsolverDnHandle_t cusolverH = NULL;
// hipblasHandle_t cublasH = NULL;
// hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS;
// cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
// hipError_t cudaStat1 = hipSuccess;
// hipError_t cudaStat2 = hipSuccess;
// hipError_t cudaStat3 = hipSuccess;
// hipError_t cudaStat4 = hipSuccess;
// const int m = HH.rows(); const int lda = m; const int ldb = m; const int nrhs = 1; // number of right hand side vectors
// double *XC = new double[ldb*nrhs];
//
// double *d_A = NULL; // linear memory of GPU
// double *d_tau = NULL; // linear memory of GPU
// double *d_B = NULL; int *devInfo = NULL; // info in gpu (device copy)
// double *d_work = NULL;
// int lwork = 0;
// int info_gpu = 0;
// const double one = 1;
//
// cusolver_status = hipsolverDnCreate(&cusolverH);
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// printf("after cusolver create:\r\n");
// cublas_status = hipblasCreate(&cublasH);
// assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
// printf("after cublas create:\r\n");
//
// cudaStat1 = hipMalloc((void**)&d_A, sizeof(double) * lda * m);
// cudaStat2 = hipMalloc((void**)&d_tau, sizeof(double) * m);
// cudaStat3 = hipMalloc((void**)&d_B, sizeof(double) * ldb * nrhs);
// cudaStat4 = hipMalloc((void**)&devInfo, sizeof(int));
// assert(hipSuccess == cudaStat1);
// assert(hipSuccess == cudaStat2);
// assert(hipSuccess == cudaStat3);
// assert(hipSuccess == cudaStat4);
// cudaStat1 = hipMemcpy(d_A, HH.data(), sizeof(double) * lda * m, hipMemcpyHostToDevice);
// cudaStat2 = hipMemcpy(d_B, fff.data(), sizeof(double) * ldb * nrhs, hipMemcpyHostToDevice);
// assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2);
//
// // step 3: query working space of geqrf and ormqr
// cusolver_status = hipsolverDnDgeqrf_bufferSize( cusolverH, m, m, d_A, lda, &lwork);
// assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
// cudaStat1 = hipMalloc((void**)&d_work, sizeof(double)*lwork);
// printf("after initialisation:\r\n");
// assert(hipSuccess == cudaStat1);
// // step 4: compute QR factorization
// cusolver_status = hipsolverDnDgeqrf( cusolverH, m, m, d_A, lda, d_tau, d_work, lwork, devInfo);
// cudaStat1 = hipDeviceSynchronize();
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// assert(hipSuccess == cudaStat1);
// printf("after QR factorization:\r\n");
// // check if QR is good or not
// cudaStat1 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost);
// assert(hipSuccess == cudaStat1);
// printf("after geqrf: info_gpu = %d\n", info_gpu);
// assert(0 == info_gpu);
// // step 5: compute Q^T*B
// cusolver_status= hipsolverDnDormqr( cusolverH, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, m, nrhs, m, d_A, lda, d_tau, d_B, ldb, d_work, lwork, devInfo);
// cudaStat1 = hipDeviceSynchronize();
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// assert(hipSuccess == cudaStat1);
//
// // check if QR is good or not
// cudaStat1 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost);
// assert(hipSuccess == cudaStat1);
// printf("after ormqr: info_gpu = %d\n", info_gpu);
// assert(0 == info_gpu);
// // step 6: compute x = R \ Q^T*B
// cublas_status = hipblasDtrsm( cublasH, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, m, nrhs, &one, d_A, lda, d_B, ldb);
// cudaStat1 = hipDeviceSynchronize(); assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
// assert(hipSuccess == cudaStat1);
// cudaStat1 = hipMemcpy(XC, d_B, sizeof(double)*ldb*nrhs, hipMemcpyDeviceToHost);
// assert(hipSuccess == cudaStat1);
//
// /*printf("X = (matlab base-1)\n");
// printMatrix(m, nrhs, XC, ldb, "X"); */
//
// // free resources
// if (d_A ) hipFree(d_A);
// if (d_tau ) hipFree(d_tau);
// if (d_B ) hipFree(d_B);
// if (devInfo) hipFree(devInfo);
// if (d_work ) hipFree(d_work);
// if (cublasH ) hipblasDestroy(cublasH);
// if (cusolverH) hipsolverDnDestroy(cusolverH);
// hipDeviceReset();
//
//
//
// count++;
// printf("%i\r\n", count);
// }
// return 0;
//}
}
}
void SetupLevel2(Eigen::MatrixXd &TX1, Eigen::MatrixXd &CN, Eigen::MatrixXd &A, Eigen::MatrixXd &C)
{
TX1(15, 2);
CN(15, 2);
C(1, 2);
C << 1.73, 600;
A(1, 2);
A << 2,4;
TX1(0, 0) = 0;
TX1(1, 0) = 0;
TX1(2, 0) = 0;
TX1(3, 0) = 0;
TX1(4, 0) = 0;
TX1(5, 0) = 0.432499999999999;
TX1(6, 0) = 0.432499999999999;
TX1(7, 0) = 0.432499999999999;
TX1(8, 0) = 0.432499999999999;
TX1(9, 0) = 0.432499999999999;
TX1(10, 0) = 0.864999999999999;
TX1(11, 0) = 0.864999999999999;
TX1(12, 0) = 0.864999999999999;
TX1(13, 0) = 0.864999999999999;
TX1(14, 0) = 0.864999999999999;
TX1(0, 1) = 0;
TX1(1, 1) = 75;
TX1(2, 1) = 150;
TX1(3, 1) = 225;
TX1(4, 1) = 300;
TX1(5, 1) = 0;
TX1(6, 1) = 75;
TX1(7, 1) = 150;
TX1(8, 1) = 225;
TX1(9, 1) = 300;
TX1(10, 1) = 0;
TX1(11, 1) = 75;
TX1(12, 1) = 150;
TX1(13, 1) = 225;
TX1(14, 1) = 300;
CN(0, 0) = 0;
CN(1, 0) = 0;
CN(2, 0) = 0;
CN(3, 0) = 0;
CN(4, 0) = 0;
CN(5, 0) = 0.432499999999999;
CN(6, 0) = 0.432499999999999;
CN(7, 0) = 0.432499999999999;
CN(8, 0) = 0.432499999999999;
CN(9, 0) = 0.432499999999999;
CN(10, 0) = 0.864999999999999;
CN(11, 0) = 0.864999999999999;
CN(12, 0) = 0.864999999999999;
CN(13, 0) = 0.864999999999999;
CN(14, 0) = 0.864999999999999;
CN(0, 1) = 0;
CN(1, 1) = 75;
CN(2, 1) = 150;
CN(3, 1) = 225;
CN(4, 1) = 300;
CN(5, 1) = 0;
CN(6, 1) = 75;
CN(7, 1) = 150;
CN(8, 1) = 225;
CN(9, 1) = 300;
CN(10, 1) = 0;
CN(11, 1) = 75;
CN(12, 1) = 150;
CN(13, 1) = 225;
CN(14, 1) = 300;
}
MatrixXd GetTX7()
{
MatrixXd TX1(325, 2);
TX1(0, 0) = 0;
TX1(1, 0) = 0;
TX1(2, 0) = 0;
TX1(3, 0) = 0;
TX1(4, 0) = 0;
TX1(5, 0) = 0;
TX1(6, 0) = 0;
TX1(7, 0) = 0;
TX1(8, 0) = 0;
TX1(9, 0) = 0;
TX1(10, 0) = 0;
TX1(11, 0) = 0;
TX1(12, 0) = 0;
TX1(13, 0) = 0;
TX1(14, 0) = 0;
TX1(15, 0) = 0;
TX1(16, 0) = 0;
TX1(17, 0) = 0;
TX1(18, 0) = 0;
TX1(19, 0) = 0;
TX1(20, 0) = 0;
TX1(21, 0) = 0;
TX1(22, 0) = 0;
TX1(23, 0) = 0;
TX1(24, 0) = 0;
TX1(25, 0) = 0;
TX1(26, 0) = 0;
TX1(27, 0) = 0;
TX1(28, 0) = 0;
TX1(29, 0) = 0;
TX1(30, 0) = 0;
TX1(31, 0) = 0;
TX1(32, 0) = 0;
TX1(33, 0) = 0;
TX1(34, 0) = 0;
TX1(35, 0) = 0;
TX1(36, 0) = 0;
TX1(37, 0) = 0;
TX1(38, 0) = 0;
TX1(39, 0) = 0;
TX1(40, 0) = 0;
TX1(41, 0) = 0;
TX1(42, 0) = 0;
TX1(43, 0) = 0;
TX1(44, 0) = 0;
TX1(45, 0) = 0;
TX1(46, 0) = 0;
TX1(47, 0) = 0;
TX1(48, 0) = 0;
TX1(49, 0) = 0;
TX1(50, 0) = 0;
TX1(51, 0) = 0;
TX1(52, 0) = 0;
TX1(53, 0) = 0;
TX1(54, 0) = 0;
TX1(55, 0) = 0;
TX1(56, 0) = 0;
TX1(57, 0) = 0;
TX1(58, 0) = 0;
TX1(59, 0) = 0;
TX1(60, 0) = 0;
TX1(61, 0) = 0;
TX1(62, 0) = 0;
TX1(63, 0) = 0;
TX1(64, 0) = 0;
TX1(65, 0) = 0.21625;
TX1(66, 0) = 0.21625;
TX1(67, 0) = 0.21625;
TX1(68, 0) = 0.21625;
TX1(69, 0) = 0.21625;
TX1(70, 0) = 0.21625;
TX1(71, 0) = 0.21625;
TX1(72, 0) = 0.21625;
TX1(73, 0) = 0.21625;
TX1(74, 0) = 0.21625;
TX1(75, 0) = 0.21625;
TX1(76, 0) = 0.21625;
TX1(77, 0) = 0.21625;
TX1(78, 0) = 0.21625;
TX1(79, 0) = 0.21625;
TX1(80, 0) = 0.21625;
TX1(81, 0) = 0.21625;
TX1(82, 0) = 0.21625;
TX1(83, 0) = 0.21625;
TX1(84, 0) = 0.21625;
TX1(85, 0) = 0.21625;
TX1(86, 0) = 0.21625;
TX1(87, 0) = 0.21625;
TX1(88, 0) = 0.21625;
TX1(89, 0) = 0.21625;
TX1(90, 0) = 0.21625;
TX1(91, 0) = 0.21625;
TX1(92, 0) = 0.21625;
TX1(93, 0) = 0.21625;
TX1(94, 0) = 0.21625;
TX1(95, 0) = 0.21625;
TX1(96, 0) = 0.21625;
TX1(97, 0) = 0.21625;
TX1(98, 0) = 0.21625;
TX1(99, 0) = 0.21625;
TX1(100, 0) = 0.21625;
TX1(101, 0) = 0.21625;
TX1(102, 0) = 0.21625;
TX1(103, 0) = 0.21625;
TX1(104, 0) = 0.21625;
TX1(105, 0) = 0.21625;
TX1(106, 0) = 0.21625;
TX1(107, 0) = 0.21625;
TX1(108, 0) = 0.21625;
TX1(109, 0) = 0.21625;
TX1(110, 0) = 0.21625;
TX1(111, 0) = 0.21625;
TX1(112, 0) = 0.21625;
TX1(113, 0) = 0.21625;
TX1(114, 0) = 0.21625;
TX1(115, 0) = 0.21625;
TX1(116, 0) = 0.21625;
TX1(117, 0) = 0.21625;
TX1(118, 0) = 0.21625;
TX1(119, 0) = 0.21625;
TX1(120, 0) = 0.21625;
TX1(121, 0) = 0.21625;
TX1(122, 0) = 0.21625;
TX1(123, 0) = 0.21625;
TX1(124, 0) = 0.21625;
TX1(125, 0) = 0.21625;
TX1(126, 0) = 0.21625;
TX1(127, 0) = 0.21625;
TX1(128, 0) = 0.21625;
TX1(129, 0) = 0.21625;
TX1(130, 0) = 0.432499999999999;
TX1(131, 0) = 0.432499999999999;
TX1(132, 0) = 0.432499999999999;
TX1(133, 0) = 0.432499999999999;
TX1(134, 0) = 0.432499999999999;
TX1(135, 0) = 0.432499999999999;
TX1(136, 0) = 0.432499999999999;
TX1(137, 0) = 0.432499999999999;
TX1(138, 0) = 0.432499999999999;
TX1(139, 0) = 0.432499999999999;
TX1(140, 0) = 0.432499999999999;
TX1(141, 0) = 0.432499999999999;
TX1(142, 0) = 0.432499999999999;
TX1(143, 0) = 0.432499999999999;
TX1(144, 0) = 0.432499999999999;
TX1(145, 0) = 0.432499999999999;
TX1(146, 0) = 0.432499999999999;
TX1(147, 0) = 0.432499999999999;
TX1(148, 0) = 0.432499999999999;
TX1(149, 0) = 0.432499999999999;
TX1(150, 0) = 0.432499999999999;
TX1(151, 0) = 0.432499999999999;
TX1(152, 0) = 0.432499999999999;
TX1(153, 0) = 0.432499999999999;
TX1(154, 0) = 0.432499999999999;
TX1(155, 0) = 0.432499999999999;
TX1(156, 0) = 0.432499999999999;
TX1(157, 0) = 0.432499999999999;
TX1(158, 0) = 0.432499999999999;
TX1(159, 0) = 0.432499999999999;
TX1(160, 0) = 0.432499999999999;
TX1(161, 0) = 0.432499999999999;
TX1(162, 0) = 0.432499999999999;
TX1(163, 0) = 0.432499999999999;
TX1(164, 0) = 0.432499999999999;
TX1(165, 0) = 0.432499999999999;
TX1(166, 0) = 0.432499999999999;
TX1(167, 0) = 0.432499999999999;
TX1(168, 0) = 0.432499999999999;
TX1(169, 0) = 0.432499999999999;
TX1(170, 0) = 0.432499999999999;
TX1(171, 0) = 0.432499999999999;
TX1(172, 0) = 0.432499999999999;
TX1(173, 0) = 0.432499999999999;
TX1(174, 0) = 0.432499999999999;
TX1(175, 0) = 0.432499999999999;
TX1(176, 0) = 0.432499999999999;
TX1(177, 0) = 0.432499999999999;
TX1(178, 0) = 0.432499999999999;
TX1(179, 0) = 0.432499999999999;
TX1(180, 0) = 0.432499999999999;
TX1(181, 0) = 0.432499999999999;
TX1(182, 0) = 0.432499999999999;
TX1(183, 0) = 0.432499999999999;
TX1(184, 0) = 0.432499999999999;
TX1(185, 0) = 0.432499999999999;
TX1(186, 0) = 0.432499999999999;
TX1(187, 0) = 0.432499999999999;
TX1(188, 0) = 0.432499999999999;
TX1(189, 0) = 0.432499999999999;
TX1(190, 0) = 0.432499999999999;
TX1(191, 0) = 0.432499999999999;
TX1(192, 0) = 0.432499999999999;
TX1(193, 0) = 0.432499999999999;
TX1(194, 0) = 0.432499999999999;
TX1(195, 0) = 0.648749999999999;
TX1(196, 0) = 0.648749999999999;
TX1(197, 0) = 0.648749999999999;
TX1(198, 0) = 0.648749999999999;
TX1(199, 0) = 0.648749999999999;
TX1(200, 0) = 0.648749999999999;
TX1(201, 0) = 0.648749999999999;
TX1(202, 0) = 0.648749999999999;
TX1(203, 0) = 0.648749999999999;
TX1(204, 0) = 0.648749999999999;
TX1(205, 0) = 0.648749999999999;
TX1(206, 0) = 0.648749999999999;
TX1(207, 0) = 0.648749999999999;
TX1(208, 0) = 0.648749999999999;
TX1(209, 0) = 0.648749999999999;
TX1(210, 0) = 0.648749999999999;
TX1(211, 0) = 0.648749999999999;
TX1(212, 0) = 0.648749999999999;
TX1(213, 0) = 0.648749999999999;
TX1(214, 0) = 0.648749999999999;
TX1(215, 0) = 0.648749999999999;
TX1(216, 0) = 0.648749999999999;
TX1(217, 0) = 0.648749999999999;
TX1(218, 0) = 0.648749999999999;
TX1(219, 0) = 0.648749999999999;
TX1(220, 0) = 0.648749999999999;
TX1(221, 0) = 0.648749999999999;
TX1(222, 0) = 0.648749999999999;
TX1(223, 0) = 0.648749999999999;
TX1(224, 0) = 0.648749999999999;
TX1(225, 0) = 0.648749999999999;
TX1(226, 0) = 0.648749999999999;
TX1(227, 0) = 0.648749999999999;
TX1(228, 0) = 0.648749999999999;
TX1(229, 0) = 0.648749999999999;
TX1(230, 0) = 0.648749999999999;
TX1(231, 0) = 0.648749999999999;
TX1(232, 0) = 0.648749999999999;
TX1(233, 0) = 0.648749999999999;
TX1(234, 0) = 0.648749999999999;
TX1(235, 0) = 0.648749999999999;
TX1(236, 0) = 0.648749999999999;
TX1(237, 0) = 0.648749999999999;
TX1(238, 0) = 0.648749999999999;
TX1(239, 0) = 0.648749999999999;
TX1(240, 0) = 0.648749999999999;
TX1(241, 0) = 0.648749999999999;
TX1(242, 0) = 0.648749999999999;
TX1(243, 0) = 0.648749999999999;
TX1(244, 0) = 0.648749999999999;
TX1(245, 0) = 0.648749999999999;
TX1(246, 0) = 0.648749999999999;
TX1(247, 0) = 0.648749999999999;
TX1(248, 0) = 0.648749999999999;
TX1(249, 0) = 0.648749999999999;
TX1(250, 0) = 0.648749999999999;
TX1(251, 0) = 0.648749999999999;
TX1(252, 0) = 0.648749999999999;
TX1(253, 0) = 0.648749999999999;
TX1(254, 0) = 0.648749999999999;
TX1(255, 0) = 0.648749999999999;
TX1(256, 0) = 0.648749999999999;
TX1(257, 0) = 0.648749999999999;
TX1(258, 0) = 0.648749999999999;
TX1(259, 0) = 0.648749999999999;
TX1(260, 0) = 0.864999999999999;
TX1(261, 0) = 0.864999999999999;
TX1(262, 0) = 0.864999999999999;
TX1(263, 0) = 0.864999999999999;
TX1(264, 0) = 0.864999999999999;
TX1(265, 0) = 0.864999999999999;
TX1(266, 0) = 0.864999999999999;
TX1(267, 0) = 0.864999999999999;
TX1(268, 0) = 0.864999999999999;
TX1(269, 0) = 0.864999999999999;
TX1(270, 0) = 0.864999999999999;
TX1(271, 0) = 0.864999999999999;
TX1(272, 0) = 0.864999999999999;
TX1(273, 0) = 0.864999999999999;
TX1(274, 0) = 0.864999999999999;
TX1(275, 0) = 0.864999999999999;
TX1(276, 0) = 0.864999999999999;
TX1(277, 0) = 0.864999999999999;
TX1(278, 0) = 0.864999999999999;
TX1(279, 0) = 0.864999999999999;
TX1(280, 0) = 0.864999999999999;
TX1(281, 0) = 0.864999999999999;
TX1(282, 0) = 0.864999999999999;
TX1(283, 0) = 0.864999999999999;
TX1(284, 0) = 0.864999999999999;
TX1(285, 0) = 0.864999999999999;
TX1(286, 0) = 0.864999999999999;
TX1(287, 0) = 0.864999999999999;
TX1(288, 0) = 0.864999999999999;
TX1(289, 0) = 0.864999999999999;
TX1(290, 0) = 0.864999999999999;
TX1(291, 0) = 0.864999999999999;
TX1(292, 0) = 0.864999999999999;
TX1(293, 0) = 0.864999999999999;
TX1(294, 0) = 0.864999999999999;
TX1(295, 0) = 0.864999999999999;
TX1(296, 0) = 0.864999999999999;
TX1(297, 0) = 0.864999999999999;
TX1(298, 0) = 0.864999999999999;
TX1(299, 0) = 0.864999999999999;
TX1(300, 0) = 0.864999999999999;
TX1(301, 0) = 0.864999999999999;
TX1(302, 0) = 0.864999999999999;
TX1(303, 0) = 0.864999999999999;
TX1(304, 0) = 0.864999999999999;
TX1(305, 0) = 0.864999999999999;
TX1(306, 0) = 0.864999999999999;
TX1(307, 0) = 0.864999999999999;
TX1(308, 0) = 0.864999999999999;
TX1(309, 0) = 0.864999999999999;
TX1(310, 0) = 0.864999999999999;
TX1(311, 0) = 0.864999999999999;
TX1(312, 0) = 0.864999999999999;
TX1(313, 0) = 0.864999999999999;
TX1(314, 0) = 0.864999999999999;
TX1(315, 0) = 0.864999999999999;
TX1(316, 0) = 0.864999999999999;
TX1(317, 0) = 0.864999999999999;
TX1(318, 0) = 0.864999999999999;
TX1(319, 0) = 0.864999999999999;
TX1(320, 0) = 0.864999999999999;
TX1(321, 0) = 0.864999999999999;
TX1(322, 0) = 0.864999999999999;
TX1(323, 0) = 0.864999999999999;
TX1(324, 0) = 0.864999999999999;
TX1(0, 1) = 0;
TX1(1, 1) = 4.6875;
TX1(2, 1) = 9.375;
TX1(3, 1) = 14.0625;
TX1(4, 1) = 18.75;
TX1(5, 1) = 23.4375;
TX1(6, 1) = 28.125;
TX1(7, 1) = 32.8125;
TX1(8, 1) = 37.5;
TX1(9, 1) = 42.1875;
TX1(10, 1) = 46.875;
TX1(11, 1) = 51.5625;
TX1(12, 1) = 56.25;
TX1(13, 1) = 60.9375;
TX1(14, 1) = 65.625;
TX1(15, 1) = 70.3125;
TX1(16, 1) = 75;
TX1(17, 1) = 79.6875;
TX1(18, 1) = 84.375;
TX1(19, 1) = 89.0625;
TX1(20, 1) = 93.75;
TX1(21, 1) = 98.4375;
TX1(22, 1) = 103.125;
TX1(23, 1) = 107.8125;
TX1(24, 1) = 112.5;
TX1(25, 1) = 117.1875;
TX1(26, 1) = 121.875;
TX1(27, 1) = 126.5625;
TX1(28, 1) = 131.25;
TX1(29, 1) = 135.9375;
TX1(30, 1) = 140.625;
TX1(31, 1) = 145.3125;
TX1(32, 1) = 150;
TX1(33, 1) = 154.6875;
TX1(34, 1) = 159.375;
TX1(35, 1) = 164.0625;
TX1(36, 1) = 168.75;
TX1(37, 1) = 173.4375;
TX1(38, 1) = 178.125;
TX1(39, 1) = 182.8125;
TX1(40, 1) = 187.5;
TX1(41, 1) = 192.1875;
TX1(42, 1) = 196.875;
TX1(43, 1) = 201.5625;
TX1(44, 1) = 206.25;
TX1(45, 1) = 210.9375;
TX1(46, 1) = 215.625;
TX1(47, 1) = 220.3125;
TX1(48, 1) = 225;
TX1(49, 1) = 229.6875;
TX1(50, 1) = 234.375;
TX1(51, 1) = 239.0625;
TX1(52, 1) = 243.75;
TX1(53, 1) = 248.4375;
TX1(54, 1) = 253.125;
TX1(55, 1) = 257.8125;
TX1(56, 1) = 262.5;
TX1(57, 1) = 267.1875;
TX1(58, 1) = 271.875;
TX1(59, 1) = 276.5625;
TX1(60, 1) = 281.25;
TX1(61, 1) = 285.9375;
TX1(62, 1) = 290.625;
TX1(63, 1) = 295.3125;
TX1(64, 1) = 300;
TX1(65, 1) = 0;
TX1(66, 1) = 4.6875;
TX1(67, 1) = 9.375;
TX1(68, 1) = 14.0625;
TX1(69, 1) = 18.75;
TX1(70, 1) = 23.4375;
TX1(71, 1) = 28.125;
TX1(72, 1) = 32.8125;
TX1(73, 1) = 37.5;
TX1(74, 1) = 42.1875;
TX1(75, 1) = 46.875;
TX1(76, 1) = 51.5625;
TX1(77, 1) = 56.25;
TX1(78, 1) = 60.9375;
TX1(79, 1) = 65.625;
TX1(80, 1) = 70.3125;
TX1(81, 1) = 75;
TX1(82, 1) = 79.6875;
TX1(83, 1) = 84.375;
TX1(84, 1) = 89.0625;
TX1(85, 1) = 93.75;
TX1(86, 1) = 98.4375;
TX1(87, 1) = 103.125;
TX1(88, 1) = 107.8125;
TX1(89, 1) = 112.5;
TX1(90, 1) = 117.1875;
TX1(91, 1) = 121.875;
TX1(92, 1) = 126.5625;
TX1(93, 1) = 131.25;
TX1(94, 1) = 135.9375;
TX1(95, 1) = 140.625;
TX1(96, 1) = 145.3125;
TX1(97, 1) = 150;
TX1(98, 1) = 154.6875;
TX1(99, 1) = 159.375;
TX1(100, 1) = 164.0625;
TX1(101, 1) = 168.75;
TX1(102, 1) = 173.4375;
TX1(103, 1) = 178.125;
TX1(104, 1) = 182.8125;
TX1(105, 1) = 187.5;
TX1(106, 1) = 192.1875;
TX1(107, 1) = 196.875;
TX1(108, 1) = 201.5625;
TX1(109, 1) = 206.25;
TX1(110, 1) = 210.9375;
TX1(111, 1) = 215.625;
TX1(112, 1) = 220.3125;
TX1(113, 1) = 225;
TX1(114, 1) = 229.6875;
TX1(115, 1) = 234.375;
TX1(116, 1) = 239.0625;
TX1(117, 1) = 243.75;
TX1(118, 1) = 248.4375;
TX1(119, 1) = 253.125;
TX1(120, 1) = 257.8125;
TX1(121, 1) = 262.5;
TX1(122, 1) = 267.1875;
TX1(123, 1) = 271.875;
TX1(124, 1) = 276.5625;
TX1(125, 1) = 281.25;
TX1(126, 1) = 285.9375;
TX1(127, 1) = 290.625;
TX1(128, 1) = 295.3125;
TX1(129, 1) = 300;
TX1(130, 1) = 0;
TX1(131, 1) = 4.6875;
TX1(132, 1) = 9.375;
TX1(133, 1) = 14.0625;
TX1(134, 1) = 18.75;
TX1(135, 1) = 23.4375;
TX1(136, 1) = 28.125;
TX1(137, 1) = 32.8125;
TX1(138, 1) = 37.5;
TX1(139, 1) = 42.1875;
TX1(140, 1) = 46.875;
TX1(141, 1) = 51.5625;
TX1(142, 1) = 56.25;
TX1(143, 1) = 60.9375;
TX1(144, 1) = 65.625;
TX1(145, 1) = 70.3125;
TX1(146, 1) = 75;
TX1(147, 1) = 79.6875;
TX1(148, 1) = 84.375;
TX1(149, 1) = 89.0625;
TX1(150, 1) = 93.75;
TX1(151, 1) = 98.4375;
TX1(152, 1) = 103.125;
TX1(153, 1) = 107.8125;
TX1(154, 1) = 112.5;
TX1(155, 1) = 117.1875;
TX1(156, 1) = 121.875;
TX1(157, 1) = 126.5625;
TX1(158, 1) = 131.25;
TX1(159, 1) = 135.9375;
TX1(160, 1) = 140.625;
TX1(161, 1) = 145.3125;
TX1(162, 1) = 150;
TX1(163, 1) = 154.6875;
TX1(164, 1) = 159.375;
TX1(165, 1) = 164.0625;
TX1(166, 1) = 168.75;
TX1(167, 1) = 173.4375;
TX1(168, 1) = 178.125;
TX1(169, 1) = 182.8125;
TX1(170, 1) = 187.5;
TX1(171, 1) = 192.1875;
TX1(172, 1) = 196.875;
TX1(173, 1) = 201.5625;
TX1(174, 1) = 206.25;
TX1(175, 1) = 210.9375;
TX1(176, 1) = 215.625;
TX1(177, 1) = 220.3125;
TX1(178, 1) = 225;
TX1(179, 1) = 229.6875;
TX1(180, 1) = 234.375;
TX1(181, 1) = 239.0625;
TX1(182, 1) = 243.75;
TX1(183, 1) = 248.4375;
TX1(184, 1) = 253.125;
TX1(185, 1) = 257.8125;
TX1(186, 1) = 262.5;
TX1(187, 1) = 267.1875;
TX1(188, 1) = 271.875;
TX1(189, 1) = 276.5625;
TX1(190, 1) = 281.25;
TX1(191, 1) = 285.9375;
TX1(192, 1) = 290.625;
TX1(193, 1) = 295.3125;
TX1(194, 1) = 300;
TX1(195, 1) = 0;
TX1(196, 1) = 4.6875;
TX1(197, 1) = 9.375;
TX1(198, 1) = 14.0625;
TX1(199, 1) = 18.75;
TX1(200, 1) = 23.4375;
TX1(201, 1) = 28.125;
TX1(202, 1) = 32.8125;
TX1(203, 1) = 37.5;
TX1(204, 1) = 42.1875;
TX1(205, 1) = 46.875;
TX1(206, 1) = 51.5625;
TX1(207, 1) = 56.25;
TX1(208, 1) = 60.9375;
TX1(209, 1) = 65.625;
TX1(210, 1) = 70.3125;
TX1(211, 1) = 75;
TX1(212, 1) = 79.6875;
TX1(213, 1) = 84.375;
TX1(214, 1) = 89.0625;
TX1(215, 1) = 93.75;
TX1(216, 1) = 98.4375;
TX1(217, 1) = 103.125;
TX1(218, 1) = 107.8125;
TX1(219, 1) = 112.5;
TX1(220, 1) = 117.1875;
TX1(221, 1) = 121.875;
TX1(222, 1) = 126.5625;
TX1(223, 1) = 131.25;
TX1(224, 1) = 135.9375;
TX1(225, 1) = 140.625;
TX1(226, 1) = 145.3125;
TX1(227, 1) = 150;
TX1(228, 1) = 154.6875;
TX1(229, 1) = 159.375;
TX1(230, 1) = 164.0625;
TX1(231, 1) = 168.75;
TX1(232, 1) = 173.4375;
TX1(233, 1) = 178.125;
TX1(234, 1) = 182.8125;
TX1(235, 1) = 187.5;
TX1(236, 1) = 192.1875;
TX1(237, 1) = 196.875;
TX1(238, 1) = 201.5625;
TX1(239, 1) = 206.25;
TX1(240, 1) = 210.9375;
TX1(241, 1) = 215.625;
TX1(242, 1) = 220.3125;
TX1(243, 1) = 225;
TX1(244, 1) = 229.6875;
TX1(245, 1) = 234.375;
TX1(246, 1) = 239.0625;
TX1(247, 1) = 243.75;
TX1(248, 1) = 248.4375;
TX1(249, 1) = 253.125;
TX1(250, 1) = 257.8125;
TX1(251, 1) = 262.5;
TX1(252, 1) = 267.1875;
TX1(253, 1) = 271.875;
TX1(254, 1) = 276.5625;
TX1(255, 1) = 281.25;
TX1(256, 1) = 285.9375;
TX1(257, 1) = 290.625;
TX1(258, 1) = 295.3125;
TX1(259, 1) = 300;
TX1(260, 1) = 0;
TX1(261, 1) = 4.6875;
TX1(262, 1) = 9.375;
TX1(263, 1) = 14.0625;
TX1(264, 1) = 18.75;
TX1(265, 1) = 23.4375;
TX1(266, 1) = 28.125;
TX1(267, 1) = 32.8125;
TX1(268, 1) = 37.5;
TX1(269, 1) = 42.1875;
TX1(270, 1) = 46.875;
TX1(271, 1) = 51.5625;
TX1(272, 1) = 56.25;
TX1(273, 1) = 60.9375;
TX1(274, 1) = 65.625;
TX1(275, 1) = 70.3125;
TX1(276, 1) = 75;
TX1(277, 1) = 79.6875;
TX1(278, 1) = 84.375;
TX1(279, 1) = 89.0625;
TX1(280, 1) = 93.75;
TX1(281, 1) = 98.4375;
TX1(282, 1) = 103.125;
TX1(283, 1) = 107.8125;
TX1(284, 1) = 112.5;
TX1(285, 1) = 117.1875;
TX1(286, 1) = 121.875;
TX1(287, 1) = 126.5625;
TX1(288, 1) = 131.25;
TX1(289, 1) = 135.9375;
TX1(290, 1) = 140.625;
TX1(291, 1) = 145.3125;
TX1(292, 1) = 150;
TX1(293, 1) = 154.6875;
TX1(294, 1) = 159.375;
TX1(295, 1) = 164.0625;
TX1(296, 1) = 168.75;
TX1(297, 1) = 173.4375;
TX1(298, 1) = 178.125;
TX1(299, 1) = 182.8125;
TX1(300, 1) = 187.5;
TX1(301, 1) = 192.1875;
TX1(302, 1) = 196.875;
TX1(303, 1) = 201.5625;
TX1(304, 1) = 206.25;
TX1(305, 1) = 210.9375;
TX1(306, 1) = 215.625;
TX1(307, 1) = 220.3125;
TX1(308, 1) = 225;
TX1(309, 1) = 229.6875;
TX1(310, 1) = 234.375;
TX1(311, 1) = 239.0625;
TX1(312, 1) = 243.75;
TX1(313, 1) = 248.4375;
TX1(314, 1) = 253.125;
TX1(315, 1) = 257.8125;
TX1(316, 1) = 262.5;
TX1(317, 1) = 267.1875;
TX1(318, 1) = 271.875;
TX1(319, 1) = 276.5625;
TX1(320, 1) = 281.25;
TX1(321, 1) = 285.9375;
TX1(322, 1) = 290.625;
TX1(323, 1) = 295.3125;
TX1(324, 1) = 300;
return TX1;
}
void SetupLevel7(Eigen::MatrixXd &TX1, Eigen::MatrixXd &CN, Eigen::MatrixXd &A, Eigen::MatrixXd &C)
{
CN(325, 2);
//C(1, 2);
//C(0, 1) = 1.73;
//C(0, 2) = 600;
//A(1, 2);
//A(0, 1) = 2;
//A(0, 2) = 64;
}
int main() {
MatrixXd TX1 = GetTX7();
MatrixXd CN = GetTX7();
MatrixXd C(1, 2);
MatrixXd A(1, 2);
C << 1.73,600;
A << 2, 64;
for (int i = 0; i < 1; i++)
{
printf("i=%i", i);
Leicester::CudaLib::CudaRBF::Gaussian2D(TX1, CN, A, C);
}
return 0;
}
| 8654ef22de989267771af5f47ece9d4bf94a83df.cu | #include "kernel.h"
using Eigen::MatrixXd;
using Eigen::VectorXd;
using namespace std;
using namespace Eigen;
using namespace thrust;
namespace Leicester
{
namespace CudaLib
{
vector<MatrixXd> GaussianND(const MatrixXd &TP, const MatrixXd &CN, const MatrixXd &A, const MatrixXd &C)
{
vector<MatrixXd> result;// V, Vt, Vx Vxy
int Num = CN.rows();
int N = TP.rows();
int dimensions = TP.cols();
MatrixXd D(N, Num);
D.fill(1.0);
vector<MatrixXd> Derivatives;
Derivatives.push_back(D);
for (int d = 0; d < 3; d++)
{
MatrixXd Dx(N, Num);
Dx.fill(1.0);
Derivatives.push_back(Dx);
}
for (int j = 0; j < Num; j++)
{
vector<VectorXd> FAIn;
for (int d = 0; d < dimensions; d++)
{
VectorXd a1 = A(0, d)*(TP.col(d).array() - CN(j, d));
VectorXd FAI = (-((A(0, d)*(TP.col(d).array() - CN(j, d))).array() * (A(0, d)*(TP.col(d).array() - CN(j, d))).array()) / (C(0, d) *C(0, d))).array().exp();
Derivatives[0].col(j).array() *= FAI.array();
FAIn.push_back(FAI);
}
VectorXd vt = -2 * (A(0, 0) / C(0, 0)) * (A(0, 0) / C(0, 0)) * (TP.col(0).array() - CN(j, 0));
Derivatives[1].col(j) = vt;
VectorXd sumij = VectorXd::Zero(TP.rows());
MatrixXd dS(TP.rows(), dimensions - 1);
for (int d = 1; d < dimensions; d++)
{
dS.col(d - 1) = (-2 * (A(0, d) / C(0, d)) * (A(0, d) / C(0, d)) * (TP.col(d).array() - CN(j, d))).array() * TP.col(d).array();
VectorXd sumi = VectorXd::Zero(TP.rows());
for (int i = 1; i < TP.cols(); i++)
{
sumi.array() = sumi.array() + TP.col(d).array() * TP.col(i).array() * (-2 * (A(0, d) * A(0, d)) / (C(0, d) *C(0, d)) + (4 * (A(0, d) * A(0, d)* A(0, d) * A(0, d)) * ((TP.col(d).array() - CN(j, i)).array() * (TP.col(d).array() - CN(j, i)).array() / (C(0, d) *C(0, d)*C(0, d) *C(0, d)))).array()).array();
}
sumij.array() = sumij.array() + sumi.array();
}
VectorXd sum = dS.rowwise().sum();
Derivatives[2].col(j) = sum;
Derivatives[3].col(j) = sumij;
for (int d = 1; d < Derivatives.size(); d++)
Derivatives[d].col(j).array() *= Derivatives[0].col(j).array();
}
return Derivatives;
}
__global__ void GaussianND_CUDA(double** result, double *TP, dim3 dTP, double *CN, dim3 dCN, double *A, dim3 dA, double *C, dim3 dC)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
__syncthreads();
if (i == 0 & j == 0)
{
printf("start mqd2_CUDA i=%i j =%i \r\n", i, j);
double* D = (double *)malloc(sizeof(double) * dTP.x * dCN.y);
double* Dt = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
double* Dx = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
double* Dxx = (double *)malloc(sizeof(double) * dTP.x *dCN.y);
printf("allocated arrays mqd2_CUDA i=%i j =%i \r\n", i, j);
dim3 threads(32, 32);
//dim3 grid(CNx / threads.x, CNy / threads.y);
dim3 grid(1, 1);
dim3 dimTP(dTP.x, dTP.y);
dim3 dimCN(dCN.x, dCN.y);
dim3 dimA(dA.x, dA.y);
dim3 dimC(dC.x, dC.y);
//printf("TP size=%f", sizeof(TP));
//printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (TP, dimTP);
//gpuErrchk << <1, 1 >> >(cudaPeekAtLastError());
//gpuErrchk << <1, 1 >> >(cudaDeviceSynchronize());
printf("dimTPx=%i dimTPy=%i dimCNx=%i dimCNy=%i dimAx=%i dimAy=%i dimCx=%i dimCy=%i\r\n", dimTP.x, dimTP.y, dimCN.x, dimCN.y, dimA.x, dimA.y, dimC.x, dimC.y);
Gaussian2d2_CUDA << <1, 1 >> > (D, Dt, Dx, Dxx, TP, dimTP, CN, dimCN, A, dimA, C, dimC);
gpuAssert << <1, 1 >> > (cudaPeekAtLastError(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (cudaDeviceSynchronize(), __FILE__, __LINE__);
//printf("D size=%f", sizeof(D));
//printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (D, dim3(dTP.y, dTP.y));
//gpuErrchk << <1, 1 >> >(cudaPeekAtLastError());
//gpuErrchk << <1, 1 >> >(cudaDeviceSynchronize());
//__syncthreads();
result[0] = D;
result[1] = Dt;
result[2] = Dx;
result[3] = Dxx;
}
__syncthreads();
//printf("end mqd2_CUDA");
}
vector<MatrixXd> CudaRBF::Gaussian2D(const MatrixXd &TP, const MatrixXd &CN, const MatrixXd &A, const MatrixXd &C)
{
gpuAssert << <1, 1 >> > (cudaDeviceSynchronize(), __FILE__, __LINE__);
//Allocate the input on host and device
const double *a, *c, *tx, *cn;
a = A.data();
c = C.data();
tx = TP.data();
cn = CN.data();
double *d_a, *d_c, *d_tx, *d_cn;
//size_t *pValue;
//cudaDeviceGetLimit(pValue, cudaLimit::cudaLimitMallocHeapSize);
//printf("Heap limit=%i\r\n", &pValue);
cudaError_t e = cudaMalloc((void **)&d_a, A.rows() * A.cols() * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_a returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * A.rows() * A.cols());
e = cudaMalloc((void **)&d_c, C.rows() * C.cols() * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_c returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * C.rows() * C.cols());
e = cudaMalloc((void **)&d_tx, TP.rows() * TP.cols() * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_tx returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * TP.rows() * TP.cols());
e = cudaMalloc((void **)&d_cn, CN.rows() * CN.cols() * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_tx1 returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * CN.rows() * CN.cols());
e = cudaMemcpy(d_a, a, sizeof(double) * A.rows() * A.cols(), cudaMemcpyKind::cudaMemcpyHostToDevice);
if (e != cudaSuccess)
printf("cudaMemcpy d_a returned error %s (code %d), line(%d) when copying %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * A.rows() * A.cols());
e = cudaMemcpy(d_c, c, sizeof(double) * C.rows() * C.cols(), cudaMemcpyKind::cudaMemcpyHostToDevice);
if (e != cudaSuccess)
printf("cudaMemcpy d_c returned error %s (code %d), line(%d) when copying %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * C.rows() * C.cols());
e = cudaMemcpy(d_tx, tx, sizeof(double) * TP.rows() * TP.cols(), cudaMemcpyKind::cudaMemcpyHostToDevice);
if (e != cudaSuccess)
printf("cudaMemcpy d_tx returned error %s (code %d), line(%d) when copying %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * TP.rows() * TP.cols());
e = cudaMemcpy(d_cn, tx, sizeof(double) * CN.rows() * CN.cols(), cudaMemcpyKind::cudaMemcpyHostToDevice);
if (e != cudaSuccess)
printf("cudaMemcpy d_tx1 returned error %s (code %d), line(%d) when copying %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * CN.rows() * CN.cols());
//Allocate the output on host and device
double *d_FAI, *d_D, *d_Dt, *d_Dx, *d_Dxx;
double *h_FAI, *h_D, *h_Dt, *h_Dx, *h_Dxx;
int rows = CN.rows();
int cols = TP.rows();
//h_FAI = new double[rows * cols];
h_D = (double*)malloc(sizeof(double) * rows * cols);
h_Dt = (double*)malloc(sizeof(double) * rows * cols);
h_Dx = (double*)malloc(sizeof(double) * rows * cols);
h_Dxx = (double*)malloc(sizeof(double) * rows * cols);
/*e = cudaMalloc((void **)&d_FAI, rows * cols * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_FAI returned error %s (code %d), line(%d)\n", cudaGetErrorString(e), e, __LINE__);*/
e = cudaMalloc((void **)&d_D, rows * cols * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_D returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMalloc((void **)&d_Dt, rows * cols * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_Dt returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMalloc((void **)&d_Dx, rows * cols * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_Dx returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMalloc((void **)&d_Dxx, rows * cols * sizeof(double));
if (e != cudaSuccess)
printf("cudaMalloc d_Dxx returned error %s (code %d), line(%d) when allocating %i bytes\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimTx(TP.cols(), TP.rows());
dim3 dimA(A.cols(), A.rows());
dim3 dimC(C.cols(), C.rows());
dim3 threads(block_size, block_size);
//dim3 grid(dimTx.x / threads.x, dimTx.y / threads.y);
dim3 grid(1, 1);
//test << < grid, threads >> > ();
//cudaDeviceSynchronize();
//printMatrix(tx, dimTx);
Gaussian2d_CUDA << < grid, threads >> > (d_D, d_Dt, d_Dx, d_Dxx, d_tx, dimTx.x, dimTx.y, d_cn, dimTx.x, dimTx.y, d_a, dimA.x, dimA.y, d_c, dimC.x, dimC.y);
//mqd2_CUDA<32>(d_result, d_tx, dimTx.x, dimTx.y, d_tx1, dimTx.x, dimTx.y, d_a, dimA.x, dimA.y, d_c, dimC.x, dimC.y);
gpuAssert << <1, 1 >> > (cudaThreadSynchronize(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (cudaDeviceSynchronize(), __FILE__, __LINE__);
gpuAssert << <1, 1 >> > (cudaPeekAtLastError(), __FILE__, __LINE__);
e = cudaMemcpy(h_D, d_D, sizeof(double) * rows * cols, cudaMemcpyKind::cudaMemcpyDeviceToHost);
if (e != cudaSuccess)
printf("cudaMemcpy d_D returned error %s (code %d), line(%d) when copying%i\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMemcpy(h_Dt, d_Dt, sizeof(double) * rows * cols, cudaMemcpyKind::cudaMemcpyDeviceToHost);
if (e != cudaSuccess)
printf("cudaMemcpy d_Dt returned error %s (code %d), line(%d) when copying%i\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMemcpy(h_Dx, d_Dx, sizeof(double) * rows * cols, cudaMemcpyKind::cudaMemcpyDeviceToHost);
if (e != cudaSuccess)
printf("cudaMemcpy d_Dx returned error %s (code %d), line(%d) when copying%i\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
e = cudaMemcpy(h_Dxx, d_Dxx, sizeof(double) * rows * cols, cudaMemcpyKind::cudaMemcpyDeviceToHost);
if (e != cudaSuccess)
printf("cudaMemcpy d_Dxx returned error %s (code %d), line(%d) when copying%i\n", cudaGetErrorString(e), e, __LINE__, sizeof(double) * rows * cols);
//gpuAssert << <1, 1 >> > (cudaThreadSynchronize());
gpuAssert << <1, 1 >> > (cudaDeviceSynchronize(), __FILE__, __LINE__);
//printMatrix(h_D, dimTx);
MatrixXd D(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapD(h_D, rows, cols);
D = dataMapD.eval();
MatrixXd Dt(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDt(h_Dt, rows, cols);
Dt = dataMapDt.eval();
// printMatrix(h_Dx, dim3(15, 15));
MatrixXd Dx(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDx(h_Dx, rows, cols);
Dx = dataMapDx.eval();
MatrixXd Dxx(rows, cols);
Eigen::Map<Eigen::MatrixXd> dataMapDxx(h_Dxx, rows, cols);
Dxx = dataMapDxx.eval();
free(h_D);
free(h_Dt);
free(h_Dx);
free(h_Dxx);
cudaFree(d_D);
cudaFree(d_Dt);
cudaFree(d_Dx);
cudaFree(d_Dxx);
cudaFree(d_a);
cudaFree(d_c);
cudaFree(d_tx);
cudaFree(d_cn);
//cudaDeviceReset();
return { D, Dt, Dx, Dxx };
}
VectorXd PushAndQueue(double push, VectorXd A, double queue)
{
VectorXd result(A.rows() + 2);
result[0] = push;
for (int i = 0; i < A.rows(); i++)
{
result[i] = A[i];
}
result[A.rows() + 1] = queue;
return result;
}
//int MethodOfLines::MoLiteration(double Tend, double Tdone, double dt, double *G, int GRows, int GCols, double *lamb, int lambRows, int lambCols, double inx2, double r, double K, MatrixXd A1, MatrixXd Aend, MatrixXd H)
//{
// int count = 0;
// while (Tend - Tdone > 1E-8)
// {
// Tdone += dt;
//
// int sizeG = GRows * GCols;
// int sizeLamb = lambRows * lambCols;
// int memG = sizeof(double) * sizeG;
// int memLamb = sizeof(double) * sizeLamb;
//
// double *d_G, *d_lamb, *d_FFF;
// int sizeFFF = GRows * lambCols;
// int memFFF = sizeof(double)* sizeFFF;
//
// double *h_FFF = (double *)malloc(memFFF);
// double *h_CUBLAS = (double *)malloc(memFFF);
//
// checkCudaErrors(cudaMalloc((void **)&d_G, memG));
// checkCudaErrors(cudaMalloc((void **)&d_lamb, memLamb));
// checkCudaErrors(cudaMemcpy(d_G, G, memG, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(d_lamb, lamb, memLamb, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMalloc((void **)&d_FFF, memFFF));
//
// cublasHandle_t handle;
// checkCudaErrors(cublasCreate(&handle));
// const double alpha = 1.0;
// const double beta = 1.0;
// checkCudaErrors(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, GRows, lambCols, GCols, &alpha, d_G, GRows, d_lamb, lambRows, &beta, d_FFF, GRows));
//
// checkCudaErrors(cudaMemcpy(h_FFF, d_FFF, memFFF, cudaMemcpyDeviceToHost));
// printf("after cublasDgemm:\r\n");
// //double i[] = h_FFF;
// VectorXd FFF = Map<VectorXd >(h_FFF, GRows, lambCols);
// VectorXd fff = PushAndQueue(0, FFF, inx2 - exp(-r*Tdone)*K);
// printf("after PushAndQueue:\r\n");
// MatrixXd HH(A1.cols(), A1.cols());
// HH.row(0) = A1;
// HH.middleRows(1, HH.rows() - 2) = H;
// HH.row(HH.rows() - 1) = Aend;
// printf("after HH construction:\r\n");
// //LLT<MatrixXd> lltOfA(HH);
// //lamb = lltOfA.solve(fff);
//
// cusolverDnHandle_t cusolverH = NULL;
// cublasHandle_t cublasH = NULL;
// cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS;
// cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
// cudaError_t cudaStat1 = cudaSuccess;
// cudaError_t cudaStat2 = cudaSuccess;
// cudaError_t cudaStat3 = cudaSuccess;
// cudaError_t cudaStat4 = cudaSuccess;
// const int m = HH.rows(); const int lda = m; const int ldb = m; const int nrhs = 1; // number of right hand side vectors
// double *XC = new double[ldb*nrhs];
//
// double *d_A = NULL; // linear memory of GPU
// double *d_tau = NULL; // linear memory of GPU
// double *d_B = NULL; int *devInfo = NULL; // info in gpu (device copy)
// double *d_work = NULL;
// int lwork = 0;
// int info_gpu = 0;
// const double one = 1;
//
// cusolver_status = cusolverDnCreate(&cusolverH);
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// printf("after cusolver create:\r\n");
// cublas_status = cublasCreate(&cublasH);
// assert(CUBLAS_STATUS_SUCCESS == cublas_status);
// printf("after cublas create:\r\n");
//
// cudaStat1 = cudaMalloc((void**)&d_A, sizeof(double) * lda * m);
// cudaStat2 = cudaMalloc((void**)&d_tau, sizeof(double) * m);
// cudaStat3 = cudaMalloc((void**)&d_B, sizeof(double) * ldb * nrhs);
// cudaStat4 = cudaMalloc((void**)&devInfo, sizeof(int));
// assert(cudaSuccess == cudaStat1);
// assert(cudaSuccess == cudaStat2);
// assert(cudaSuccess == cudaStat3);
// assert(cudaSuccess == cudaStat4);
// cudaStat1 = cudaMemcpy(d_A, HH.data(), sizeof(double) * lda * m, cudaMemcpyHostToDevice);
// cudaStat2 = cudaMemcpy(d_B, fff.data(), sizeof(double) * ldb * nrhs, cudaMemcpyHostToDevice);
// assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2);
//
// // step 3: query working space of geqrf and ormqr
// cusolver_status = cusolverDnDgeqrf_bufferSize( cusolverH, m, m, d_A, lda, &lwork);
// assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
// cudaStat1 = cudaMalloc((void**)&d_work, sizeof(double)*lwork);
// printf("after initialisation:\r\n");
// assert(cudaSuccess == cudaStat1);
// // step 4: compute QR factorization
// cusolver_status = cusolverDnDgeqrf( cusolverH, m, m, d_A, lda, d_tau, d_work, lwork, devInfo);
// cudaStat1 = cudaDeviceSynchronize();
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// assert(cudaSuccess == cudaStat1);
// printf("after QR factorization:\r\n");
// // check if QR is good or not
// cudaStat1 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
// assert(cudaSuccess == cudaStat1);
// printf("after geqrf: info_gpu = %d\n", info_gpu);
// assert(0 == info_gpu);
// // step 5: compute Q^T*B
// cusolver_status= cusolverDnDormqr( cusolverH, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, m, nrhs, m, d_A, lda, d_tau, d_B, ldb, d_work, lwork, devInfo);
// cudaStat1 = cudaDeviceSynchronize();
// assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
// assert(cudaSuccess == cudaStat1);
//
// // check if QR is good or not
// cudaStat1 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
// assert(cudaSuccess == cudaStat1);
// printf("after ormqr: info_gpu = %d\n", info_gpu);
// assert(0 == info_gpu);
// // step 6: compute x = R \ Q^T*B
// cublas_status = cublasDtrsm( cublasH, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, m, nrhs, &one, d_A, lda, d_B, ldb);
// cudaStat1 = cudaDeviceSynchronize(); assert(CUBLAS_STATUS_SUCCESS == cublas_status);
// assert(cudaSuccess == cudaStat1);
// cudaStat1 = cudaMemcpy(XC, d_B, sizeof(double)*ldb*nrhs, cudaMemcpyDeviceToHost);
// assert(cudaSuccess == cudaStat1);
//
// /*printf("X = (matlab base-1)\n");
// printMatrix(m, nrhs, XC, ldb, "X"); */
//
// // free resources
// if (d_A ) cudaFree(d_A);
// if (d_tau ) cudaFree(d_tau);
// if (d_B ) cudaFree(d_B);
// if (devInfo) cudaFree(devInfo);
// if (d_work ) cudaFree(d_work);
// if (cublasH ) cublasDestroy(cublasH);
// if (cusolverH) cusolverDnDestroy(cusolverH);
// cudaDeviceReset();
//
//
//
// count++;
// printf("%i\r\n", count);
// }
// return 0;
//}
}
}
void SetupLevel2(Eigen::MatrixXd &TX1, Eigen::MatrixXd &CN, Eigen::MatrixXd &A, Eigen::MatrixXd &C)
{
TX1(15, 2);
CN(15, 2);
C(1, 2);
C << 1.73, 600;
A(1, 2);
A << 2,4;
TX1(0, 0) = 0;
TX1(1, 0) = 0;
TX1(2, 0) = 0;
TX1(3, 0) = 0;
TX1(4, 0) = 0;
TX1(5, 0) = 0.432499999999999;
TX1(6, 0) = 0.432499999999999;
TX1(7, 0) = 0.432499999999999;
TX1(8, 0) = 0.432499999999999;
TX1(9, 0) = 0.432499999999999;
TX1(10, 0) = 0.864999999999999;
TX1(11, 0) = 0.864999999999999;
TX1(12, 0) = 0.864999999999999;
TX1(13, 0) = 0.864999999999999;
TX1(14, 0) = 0.864999999999999;
TX1(0, 1) = 0;
TX1(1, 1) = 75;
TX1(2, 1) = 150;
TX1(3, 1) = 225;
TX1(4, 1) = 300;
TX1(5, 1) = 0;
TX1(6, 1) = 75;
TX1(7, 1) = 150;
TX1(8, 1) = 225;
TX1(9, 1) = 300;
TX1(10, 1) = 0;
TX1(11, 1) = 75;
TX1(12, 1) = 150;
TX1(13, 1) = 225;
TX1(14, 1) = 300;
CN(0, 0) = 0;
CN(1, 0) = 0;
CN(2, 0) = 0;
CN(3, 0) = 0;
CN(4, 0) = 0;
CN(5, 0) = 0.432499999999999;
CN(6, 0) = 0.432499999999999;
CN(7, 0) = 0.432499999999999;
CN(8, 0) = 0.432499999999999;
CN(9, 0) = 0.432499999999999;
CN(10, 0) = 0.864999999999999;
CN(11, 0) = 0.864999999999999;
CN(12, 0) = 0.864999999999999;
CN(13, 0) = 0.864999999999999;
CN(14, 0) = 0.864999999999999;
CN(0, 1) = 0;
CN(1, 1) = 75;
CN(2, 1) = 150;
CN(3, 1) = 225;
CN(4, 1) = 300;
CN(5, 1) = 0;
CN(6, 1) = 75;
CN(7, 1) = 150;
CN(8, 1) = 225;
CN(9, 1) = 300;
CN(10, 1) = 0;
CN(11, 1) = 75;
CN(12, 1) = 150;
CN(13, 1) = 225;
CN(14, 1) = 300;
}
MatrixXd GetTX7()
{
MatrixXd TX1(325, 2);
TX1(0, 0) = 0;
TX1(1, 0) = 0;
TX1(2, 0) = 0;
TX1(3, 0) = 0;
TX1(4, 0) = 0;
TX1(5, 0) = 0;
TX1(6, 0) = 0;
TX1(7, 0) = 0;
TX1(8, 0) = 0;
TX1(9, 0) = 0;
TX1(10, 0) = 0;
TX1(11, 0) = 0;
TX1(12, 0) = 0;
TX1(13, 0) = 0;
TX1(14, 0) = 0;
TX1(15, 0) = 0;
TX1(16, 0) = 0;
TX1(17, 0) = 0;
TX1(18, 0) = 0;
TX1(19, 0) = 0;
TX1(20, 0) = 0;
TX1(21, 0) = 0;
TX1(22, 0) = 0;
TX1(23, 0) = 0;
TX1(24, 0) = 0;
TX1(25, 0) = 0;
TX1(26, 0) = 0;
TX1(27, 0) = 0;
TX1(28, 0) = 0;
TX1(29, 0) = 0;
TX1(30, 0) = 0;
TX1(31, 0) = 0;
TX1(32, 0) = 0;
TX1(33, 0) = 0;
TX1(34, 0) = 0;
TX1(35, 0) = 0;
TX1(36, 0) = 0;
TX1(37, 0) = 0;
TX1(38, 0) = 0;
TX1(39, 0) = 0;
TX1(40, 0) = 0;
TX1(41, 0) = 0;
TX1(42, 0) = 0;
TX1(43, 0) = 0;
TX1(44, 0) = 0;
TX1(45, 0) = 0;
TX1(46, 0) = 0;
TX1(47, 0) = 0;
TX1(48, 0) = 0;
TX1(49, 0) = 0;
TX1(50, 0) = 0;
TX1(51, 0) = 0;
TX1(52, 0) = 0;
TX1(53, 0) = 0;
TX1(54, 0) = 0;
TX1(55, 0) = 0;
TX1(56, 0) = 0;
TX1(57, 0) = 0;
TX1(58, 0) = 0;
TX1(59, 0) = 0;
TX1(60, 0) = 0;
TX1(61, 0) = 0;
TX1(62, 0) = 0;
TX1(63, 0) = 0;
TX1(64, 0) = 0;
TX1(65, 0) = 0.21625;
TX1(66, 0) = 0.21625;
TX1(67, 0) = 0.21625;
TX1(68, 0) = 0.21625;
TX1(69, 0) = 0.21625;
TX1(70, 0) = 0.21625;
TX1(71, 0) = 0.21625;
TX1(72, 0) = 0.21625;
TX1(73, 0) = 0.21625;
TX1(74, 0) = 0.21625;
TX1(75, 0) = 0.21625;
TX1(76, 0) = 0.21625;
TX1(77, 0) = 0.21625;
TX1(78, 0) = 0.21625;
TX1(79, 0) = 0.21625;
TX1(80, 0) = 0.21625;
TX1(81, 0) = 0.21625;
TX1(82, 0) = 0.21625;
TX1(83, 0) = 0.21625;
TX1(84, 0) = 0.21625;
TX1(85, 0) = 0.21625;
TX1(86, 0) = 0.21625;
TX1(87, 0) = 0.21625;
TX1(88, 0) = 0.21625;
TX1(89, 0) = 0.21625;
TX1(90, 0) = 0.21625;
TX1(91, 0) = 0.21625;
TX1(92, 0) = 0.21625;
TX1(93, 0) = 0.21625;
TX1(94, 0) = 0.21625;
TX1(95, 0) = 0.21625;
TX1(96, 0) = 0.21625;
TX1(97, 0) = 0.21625;
TX1(98, 0) = 0.21625;
TX1(99, 0) = 0.21625;
TX1(100, 0) = 0.21625;
TX1(101, 0) = 0.21625;
TX1(102, 0) = 0.21625;
TX1(103, 0) = 0.21625;
TX1(104, 0) = 0.21625;
TX1(105, 0) = 0.21625;
TX1(106, 0) = 0.21625;
TX1(107, 0) = 0.21625;
TX1(108, 0) = 0.21625;
TX1(109, 0) = 0.21625;
TX1(110, 0) = 0.21625;
TX1(111, 0) = 0.21625;
TX1(112, 0) = 0.21625;
TX1(113, 0) = 0.21625;
TX1(114, 0) = 0.21625;
TX1(115, 0) = 0.21625;
TX1(116, 0) = 0.21625;
TX1(117, 0) = 0.21625;
TX1(118, 0) = 0.21625;
TX1(119, 0) = 0.21625;
TX1(120, 0) = 0.21625;
TX1(121, 0) = 0.21625;
TX1(122, 0) = 0.21625;
TX1(123, 0) = 0.21625;
TX1(124, 0) = 0.21625;
TX1(125, 0) = 0.21625;
TX1(126, 0) = 0.21625;
TX1(127, 0) = 0.21625;
TX1(128, 0) = 0.21625;
TX1(129, 0) = 0.21625;
TX1(130, 0) = 0.432499999999999;
TX1(131, 0) = 0.432499999999999;
TX1(132, 0) = 0.432499999999999;
TX1(133, 0) = 0.432499999999999;
TX1(134, 0) = 0.432499999999999;
TX1(135, 0) = 0.432499999999999;
TX1(136, 0) = 0.432499999999999;
TX1(137, 0) = 0.432499999999999;
TX1(138, 0) = 0.432499999999999;
TX1(139, 0) = 0.432499999999999;
TX1(140, 0) = 0.432499999999999;
TX1(141, 0) = 0.432499999999999;
TX1(142, 0) = 0.432499999999999;
TX1(143, 0) = 0.432499999999999;
TX1(144, 0) = 0.432499999999999;
TX1(145, 0) = 0.432499999999999;
TX1(146, 0) = 0.432499999999999;
TX1(147, 0) = 0.432499999999999;
TX1(148, 0) = 0.432499999999999;
TX1(149, 0) = 0.432499999999999;
TX1(150, 0) = 0.432499999999999;
TX1(151, 0) = 0.432499999999999;
TX1(152, 0) = 0.432499999999999;
TX1(153, 0) = 0.432499999999999;
TX1(154, 0) = 0.432499999999999;
TX1(155, 0) = 0.432499999999999;
TX1(156, 0) = 0.432499999999999;
TX1(157, 0) = 0.432499999999999;
TX1(158, 0) = 0.432499999999999;
TX1(159, 0) = 0.432499999999999;
TX1(160, 0) = 0.432499999999999;
TX1(161, 0) = 0.432499999999999;
TX1(162, 0) = 0.432499999999999;
TX1(163, 0) = 0.432499999999999;
TX1(164, 0) = 0.432499999999999;
TX1(165, 0) = 0.432499999999999;
TX1(166, 0) = 0.432499999999999;
TX1(167, 0) = 0.432499999999999;
TX1(168, 0) = 0.432499999999999;
TX1(169, 0) = 0.432499999999999;
TX1(170, 0) = 0.432499999999999;
TX1(171, 0) = 0.432499999999999;
TX1(172, 0) = 0.432499999999999;
TX1(173, 0) = 0.432499999999999;
TX1(174, 0) = 0.432499999999999;
TX1(175, 0) = 0.432499999999999;
TX1(176, 0) = 0.432499999999999;
TX1(177, 0) = 0.432499999999999;
TX1(178, 0) = 0.432499999999999;
TX1(179, 0) = 0.432499999999999;
TX1(180, 0) = 0.432499999999999;
TX1(181, 0) = 0.432499999999999;
TX1(182, 0) = 0.432499999999999;
TX1(183, 0) = 0.432499999999999;
TX1(184, 0) = 0.432499999999999;
TX1(185, 0) = 0.432499999999999;
TX1(186, 0) = 0.432499999999999;
TX1(187, 0) = 0.432499999999999;
TX1(188, 0) = 0.432499999999999;
TX1(189, 0) = 0.432499999999999;
TX1(190, 0) = 0.432499999999999;
TX1(191, 0) = 0.432499999999999;
TX1(192, 0) = 0.432499999999999;
TX1(193, 0) = 0.432499999999999;
TX1(194, 0) = 0.432499999999999;
TX1(195, 0) = 0.648749999999999;
TX1(196, 0) = 0.648749999999999;
TX1(197, 0) = 0.648749999999999;
TX1(198, 0) = 0.648749999999999;
TX1(199, 0) = 0.648749999999999;
TX1(200, 0) = 0.648749999999999;
TX1(201, 0) = 0.648749999999999;
TX1(202, 0) = 0.648749999999999;
TX1(203, 0) = 0.648749999999999;
TX1(204, 0) = 0.648749999999999;
TX1(205, 0) = 0.648749999999999;
TX1(206, 0) = 0.648749999999999;
TX1(207, 0) = 0.648749999999999;
TX1(208, 0) = 0.648749999999999;
TX1(209, 0) = 0.648749999999999;
TX1(210, 0) = 0.648749999999999;
TX1(211, 0) = 0.648749999999999;
TX1(212, 0) = 0.648749999999999;
TX1(213, 0) = 0.648749999999999;
TX1(214, 0) = 0.648749999999999;
TX1(215, 0) = 0.648749999999999;
TX1(216, 0) = 0.648749999999999;
TX1(217, 0) = 0.648749999999999;
TX1(218, 0) = 0.648749999999999;
TX1(219, 0) = 0.648749999999999;
TX1(220, 0) = 0.648749999999999;
TX1(221, 0) = 0.648749999999999;
TX1(222, 0) = 0.648749999999999;
TX1(223, 0) = 0.648749999999999;
TX1(224, 0) = 0.648749999999999;
TX1(225, 0) = 0.648749999999999;
TX1(226, 0) = 0.648749999999999;
TX1(227, 0) = 0.648749999999999;
TX1(228, 0) = 0.648749999999999;
TX1(229, 0) = 0.648749999999999;
TX1(230, 0) = 0.648749999999999;
TX1(231, 0) = 0.648749999999999;
TX1(232, 0) = 0.648749999999999;
TX1(233, 0) = 0.648749999999999;
TX1(234, 0) = 0.648749999999999;
TX1(235, 0) = 0.648749999999999;
TX1(236, 0) = 0.648749999999999;
TX1(237, 0) = 0.648749999999999;
TX1(238, 0) = 0.648749999999999;
TX1(239, 0) = 0.648749999999999;
TX1(240, 0) = 0.648749999999999;
TX1(241, 0) = 0.648749999999999;
TX1(242, 0) = 0.648749999999999;
TX1(243, 0) = 0.648749999999999;
TX1(244, 0) = 0.648749999999999;
TX1(245, 0) = 0.648749999999999;
TX1(246, 0) = 0.648749999999999;
TX1(247, 0) = 0.648749999999999;
TX1(248, 0) = 0.648749999999999;
TX1(249, 0) = 0.648749999999999;
TX1(250, 0) = 0.648749999999999;
TX1(251, 0) = 0.648749999999999;
TX1(252, 0) = 0.648749999999999;
TX1(253, 0) = 0.648749999999999;
TX1(254, 0) = 0.648749999999999;
TX1(255, 0) = 0.648749999999999;
TX1(256, 0) = 0.648749999999999;
TX1(257, 0) = 0.648749999999999;
TX1(258, 0) = 0.648749999999999;
TX1(259, 0) = 0.648749999999999;
TX1(260, 0) = 0.864999999999999;
TX1(261, 0) = 0.864999999999999;
TX1(262, 0) = 0.864999999999999;
TX1(263, 0) = 0.864999999999999;
TX1(264, 0) = 0.864999999999999;
TX1(265, 0) = 0.864999999999999;
TX1(266, 0) = 0.864999999999999;
TX1(267, 0) = 0.864999999999999;
TX1(268, 0) = 0.864999999999999;
TX1(269, 0) = 0.864999999999999;
TX1(270, 0) = 0.864999999999999;
TX1(271, 0) = 0.864999999999999;
TX1(272, 0) = 0.864999999999999;
TX1(273, 0) = 0.864999999999999;
TX1(274, 0) = 0.864999999999999;
TX1(275, 0) = 0.864999999999999;
TX1(276, 0) = 0.864999999999999;
TX1(277, 0) = 0.864999999999999;
TX1(278, 0) = 0.864999999999999;
TX1(279, 0) = 0.864999999999999;
TX1(280, 0) = 0.864999999999999;
TX1(281, 0) = 0.864999999999999;
TX1(282, 0) = 0.864999999999999;
TX1(283, 0) = 0.864999999999999;
TX1(284, 0) = 0.864999999999999;
TX1(285, 0) = 0.864999999999999;
TX1(286, 0) = 0.864999999999999;
TX1(287, 0) = 0.864999999999999;
TX1(288, 0) = 0.864999999999999;
TX1(289, 0) = 0.864999999999999;
TX1(290, 0) = 0.864999999999999;
TX1(291, 0) = 0.864999999999999;
TX1(292, 0) = 0.864999999999999;
TX1(293, 0) = 0.864999999999999;
TX1(294, 0) = 0.864999999999999;
TX1(295, 0) = 0.864999999999999;
TX1(296, 0) = 0.864999999999999;
TX1(297, 0) = 0.864999999999999;
TX1(298, 0) = 0.864999999999999;
TX1(299, 0) = 0.864999999999999;
TX1(300, 0) = 0.864999999999999;
TX1(301, 0) = 0.864999999999999;
TX1(302, 0) = 0.864999999999999;
TX1(303, 0) = 0.864999999999999;
TX1(304, 0) = 0.864999999999999;
TX1(305, 0) = 0.864999999999999;
TX1(306, 0) = 0.864999999999999;
TX1(307, 0) = 0.864999999999999;
TX1(308, 0) = 0.864999999999999;
TX1(309, 0) = 0.864999999999999;
TX1(310, 0) = 0.864999999999999;
TX1(311, 0) = 0.864999999999999;
TX1(312, 0) = 0.864999999999999;
TX1(313, 0) = 0.864999999999999;
TX1(314, 0) = 0.864999999999999;
TX1(315, 0) = 0.864999999999999;
TX1(316, 0) = 0.864999999999999;
TX1(317, 0) = 0.864999999999999;
TX1(318, 0) = 0.864999999999999;
TX1(319, 0) = 0.864999999999999;
TX1(320, 0) = 0.864999999999999;
TX1(321, 0) = 0.864999999999999;
TX1(322, 0) = 0.864999999999999;
TX1(323, 0) = 0.864999999999999;
TX1(324, 0) = 0.864999999999999;
TX1(0, 1) = 0;
TX1(1, 1) = 4.6875;
TX1(2, 1) = 9.375;
TX1(3, 1) = 14.0625;
TX1(4, 1) = 18.75;
TX1(5, 1) = 23.4375;
TX1(6, 1) = 28.125;
TX1(7, 1) = 32.8125;
TX1(8, 1) = 37.5;
TX1(9, 1) = 42.1875;
TX1(10, 1) = 46.875;
TX1(11, 1) = 51.5625;
TX1(12, 1) = 56.25;
TX1(13, 1) = 60.9375;
TX1(14, 1) = 65.625;
TX1(15, 1) = 70.3125;
TX1(16, 1) = 75;
TX1(17, 1) = 79.6875;
TX1(18, 1) = 84.375;
TX1(19, 1) = 89.0625;
TX1(20, 1) = 93.75;
TX1(21, 1) = 98.4375;
TX1(22, 1) = 103.125;
TX1(23, 1) = 107.8125;
TX1(24, 1) = 112.5;
TX1(25, 1) = 117.1875;
TX1(26, 1) = 121.875;
TX1(27, 1) = 126.5625;
TX1(28, 1) = 131.25;
TX1(29, 1) = 135.9375;
TX1(30, 1) = 140.625;
TX1(31, 1) = 145.3125;
TX1(32, 1) = 150;
TX1(33, 1) = 154.6875;
TX1(34, 1) = 159.375;
TX1(35, 1) = 164.0625;
TX1(36, 1) = 168.75;
TX1(37, 1) = 173.4375;
TX1(38, 1) = 178.125;
TX1(39, 1) = 182.8125;
TX1(40, 1) = 187.5;
TX1(41, 1) = 192.1875;
TX1(42, 1) = 196.875;
TX1(43, 1) = 201.5625;
TX1(44, 1) = 206.25;
TX1(45, 1) = 210.9375;
TX1(46, 1) = 215.625;
TX1(47, 1) = 220.3125;
TX1(48, 1) = 225;
TX1(49, 1) = 229.6875;
TX1(50, 1) = 234.375;
TX1(51, 1) = 239.0625;
TX1(52, 1) = 243.75;
TX1(53, 1) = 248.4375;
TX1(54, 1) = 253.125;
TX1(55, 1) = 257.8125;
TX1(56, 1) = 262.5;
TX1(57, 1) = 267.1875;
TX1(58, 1) = 271.875;
TX1(59, 1) = 276.5625;
TX1(60, 1) = 281.25;
TX1(61, 1) = 285.9375;
TX1(62, 1) = 290.625;
TX1(63, 1) = 295.3125;
TX1(64, 1) = 300;
TX1(65, 1) = 0;
TX1(66, 1) = 4.6875;
TX1(67, 1) = 9.375;
TX1(68, 1) = 14.0625;
TX1(69, 1) = 18.75;
TX1(70, 1) = 23.4375;
TX1(71, 1) = 28.125;
TX1(72, 1) = 32.8125;
TX1(73, 1) = 37.5;
TX1(74, 1) = 42.1875;
TX1(75, 1) = 46.875;
TX1(76, 1) = 51.5625;
TX1(77, 1) = 56.25;
TX1(78, 1) = 60.9375;
TX1(79, 1) = 65.625;
TX1(80, 1) = 70.3125;
TX1(81, 1) = 75;
TX1(82, 1) = 79.6875;
TX1(83, 1) = 84.375;
TX1(84, 1) = 89.0625;
TX1(85, 1) = 93.75;
TX1(86, 1) = 98.4375;
TX1(87, 1) = 103.125;
TX1(88, 1) = 107.8125;
TX1(89, 1) = 112.5;
TX1(90, 1) = 117.1875;
TX1(91, 1) = 121.875;
TX1(92, 1) = 126.5625;
TX1(93, 1) = 131.25;
TX1(94, 1) = 135.9375;
TX1(95, 1) = 140.625;
TX1(96, 1) = 145.3125;
TX1(97, 1) = 150;
TX1(98, 1) = 154.6875;
TX1(99, 1) = 159.375;
TX1(100, 1) = 164.0625;
TX1(101, 1) = 168.75;
TX1(102, 1) = 173.4375;
TX1(103, 1) = 178.125;
TX1(104, 1) = 182.8125;
TX1(105, 1) = 187.5;
TX1(106, 1) = 192.1875;
TX1(107, 1) = 196.875;
TX1(108, 1) = 201.5625;
TX1(109, 1) = 206.25;
TX1(110, 1) = 210.9375;
TX1(111, 1) = 215.625;
TX1(112, 1) = 220.3125;
TX1(113, 1) = 225;
TX1(114, 1) = 229.6875;
TX1(115, 1) = 234.375;
TX1(116, 1) = 239.0625;
TX1(117, 1) = 243.75;
TX1(118, 1) = 248.4375;
TX1(119, 1) = 253.125;
TX1(120, 1) = 257.8125;
TX1(121, 1) = 262.5;
TX1(122, 1) = 267.1875;
TX1(123, 1) = 271.875;
TX1(124, 1) = 276.5625;
TX1(125, 1) = 281.25;
TX1(126, 1) = 285.9375;
TX1(127, 1) = 290.625;
TX1(128, 1) = 295.3125;
TX1(129, 1) = 300;
TX1(130, 1) = 0;
TX1(131, 1) = 4.6875;
TX1(132, 1) = 9.375;
TX1(133, 1) = 14.0625;
TX1(134, 1) = 18.75;
TX1(135, 1) = 23.4375;
TX1(136, 1) = 28.125;
TX1(137, 1) = 32.8125;
TX1(138, 1) = 37.5;
TX1(139, 1) = 42.1875;
TX1(140, 1) = 46.875;
TX1(141, 1) = 51.5625;
TX1(142, 1) = 56.25;
TX1(143, 1) = 60.9375;
TX1(144, 1) = 65.625;
TX1(145, 1) = 70.3125;
TX1(146, 1) = 75;
TX1(147, 1) = 79.6875;
TX1(148, 1) = 84.375;
TX1(149, 1) = 89.0625;
TX1(150, 1) = 93.75;
TX1(151, 1) = 98.4375;
TX1(152, 1) = 103.125;
TX1(153, 1) = 107.8125;
TX1(154, 1) = 112.5;
TX1(155, 1) = 117.1875;
TX1(156, 1) = 121.875;
TX1(157, 1) = 126.5625;
TX1(158, 1) = 131.25;
TX1(159, 1) = 135.9375;
TX1(160, 1) = 140.625;
TX1(161, 1) = 145.3125;
TX1(162, 1) = 150;
TX1(163, 1) = 154.6875;
TX1(164, 1) = 159.375;
TX1(165, 1) = 164.0625;
TX1(166, 1) = 168.75;
TX1(167, 1) = 173.4375;
TX1(168, 1) = 178.125;
TX1(169, 1) = 182.8125;
TX1(170, 1) = 187.5;
TX1(171, 1) = 192.1875;
TX1(172, 1) = 196.875;
TX1(173, 1) = 201.5625;
TX1(174, 1) = 206.25;
TX1(175, 1) = 210.9375;
TX1(176, 1) = 215.625;
TX1(177, 1) = 220.3125;
TX1(178, 1) = 225;
TX1(179, 1) = 229.6875;
TX1(180, 1) = 234.375;
TX1(181, 1) = 239.0625;
TX1(182, 1) = 243.75;
TX1(183, 1) = 248.4375;
TX1(184, 1) = 253.125;
TX1(185, 1) = 257.8125;
TX1(186, 1) = 262.5;
TX1(187, 1) = 267.1875;
TX1(188, 1) = 271.875;
TX1(189, 1) = 276.5625;
TX1(190, 1) = 281.25;
TX1(191, 1) = 285.9375;
TX1(192, 1) = 290.625;
TX1(193, 1) = 295.3125;
TX1(194, 1) = 300;
TX1(195, 1) = 0;
TX1(196, 1) = 4.6875;
TX1(197, 1) = 9.375;
TX1(198, 1) = 14.0625;
TX1(199, 1) = 18.75;
TX1(200, 1) = 23.4375;
TX1(201, 1) = 28.125;
TX1(202, 1) = 32.8125;
TX1(203, 1) = 37.5;
TX1(204, 1) = 42.1875;
TX1(205, 1) = 46.875;
TX1(206, 1) = 51.5625;
TX1(207, 1) = 56.25;
TX1(208, 1) = 60.9375;
TX1(209, 1) = 65.625;
TX1(210, 1) = 70.3125;
TX1(211, 1) = 75;
TX1(212, 1) = 79.6875;
TX1(213, 1) = 84.375;
TX1(214, 1) = 89.0625;
TX1(215, 1) = 93.75;
TX1(216, 1) = 98.4375;
TX1(217, 1) = 103.125;
TX1(218, 1) = 107.8125;
TX1(219, 1) = 112.5;
TX1(220, 1) = 117.1875;
TX1(221, 1) = 121.875;
TX1(222, 1) = 126.5625;
TX1(223, 1) = 131.25;
TX1(224, 1) = 135.9375;
TX1(225, 1) = 140.625;
TX1(226, 1) = 145.3125;
TX1(227, 1) = 150;
TX1(228, 1) = 154.6875;
TX1(229, 1) = 159.375;
TX1(230, 1) = 164.0625;
TX1(231, 1) = 168.75;
TX1(232, 1) = 173.4375;
TX1(233, 1) = 178.125;
TX1(234, 1) = 182.8125;
TX1(235, 1) = 187.5;
TX1(236, 1) = 192.1875;
TX1(237, 1) = 196.875;
TX1(238, 1) = 201.5625;
TX1(239, 1) = 206.25;
TX1(240, 1) = 210.9375;
TX1(241, 1) = 215.625;
TX1(242, 1) = 220.3125;
TX1(243, 1) = 225;
TX1(244, 1) = 229.6875;
TX1(245, 1) = 234.375;
TX1(246, 1) = 239.0625;
TX1(247, 1) = 243.75;
TX1(248, 1) = 248.4375;
TX1(249, 1) = 253.125;
TX1(250, 1) = 257.8125;
TX1(251, 1) = 262.5;
TX1(252, 1) = 267.1875;
TX1(253, 1) = 271.875;
TX1(254, 1) = 276.5625;
TX1(255, 1) = 281.25;
TX1(256, 1) = 285.9375;
TX1(257, 1) = 290.625;
TX1(258, 1) = 295.3125;
TX1(259, 1) = 300;
TX1(260, 1) = 0;
TX1(261, 1) = 4.6875;
TX1(262, 1) = 9.375;
TX1(263, 1) = 14.0625;
TX1(264, 1) = 18.75;
TX1(265, 1) = 23.4375;
TX1(266, 1) = 28.125;
TX1(267, 1) = 32.8125;
TX1(268, 1) = 37.5;
TX1(269, 1) = 42.1875;
TX1(270, 1) = 46.875;
TX1(271, 1) = 51.5625;
TX1(272, 1) = 56.25;
TX1(273, 1) = 60.9375;
TX1(274, 1) = 65.625;
TX1(275, 1) = 70.3125;
TX1(276, 1) = 75;
TX1(277, 1) = 79.6875;
TX1(278, 1) = 84.375;
TX1(279, 1) = 89.0625;
TX1(280, 1) = 93.75;
TX1(281, 1) = 98.4375;
TX1(282, 1) = 103.125;
TX1(283, 1) = 107.8125;
TX1(284, 1) = 112.5;
TX1(285, 1) = 117.1875;
TX1(286, 1) = 121.875;
TX1(287, 1) = 126.5625;
TX1(288, 1) = 131.25;
TX1(289, 1) = 135.9375;
TX1(290, 1) = 140.625;
TX1(291, 1) = 145.3125;
TX1(292, 1) = 150;
TX1(293, 1) = 154.6875;
TX1(294, 1) = 159.375;
TX1(295, 1) = 164.0625;
TX1(296, 1) = 168.75;
TX1(297, 1) = 173.4375;
TX1(298, 1) = 178.125;
TX1(299, 1) = 182.8125;
TX1(300, 1) = 187.5;
TX1(301, 1) = 192.1875;
TX1(302, 1) = 196.875;
TX1(303, 1) = 201.5625;
TX1(304, 1) = 206.25;
TX1(305, 1) = 210.9375;
TX1(306, 1) = 215.625;
TX1(307, 1) = 220.3125;
TX1(308, 1) = 225;
TX1(309, 1) = 229.6875;
TX1(310, 1) = 234.375;
TX1(311, 1) = 239.0625;
TX1(312, 1) = 243.75;
TX1(313, 1) = 248.4375;
TX1(314, 1) = 253.125;
TX1(315, 1) = 257.8125;
TX1(316, 1) = 262.5;
TX1(317, 1) = 267.1875;
TX1(318, 1) = 271.875;
TX1(319, 1) = 276.5625;
TX1(320, 1) = 281.25;
TX1(321, 1) = 285.9375;
TX1(322, 1) = 290.625;
TX1(323, 1) = 295.3125;
TX1(324, 1) = 300;
return TX1;
}
void SetupLevel7(Eigen::MatrixXd &TX1, Eigen::MatrixXd &CN, Eigen::MatrixXd &A, Eigen::MatrixXd &C)
{
CN(325, 2);
//C(1, 2);
//C(0, 1) = 1.73;
//C(0, 2) = 600;
//A(1, 2);
//A(0, 1) = 2;
//A(0, 2) = 64;
}
int main() {
MatrixXd TX1 = GetTX7();
MatrixXd CN = GetTX7();
MatrixXd C(1, 2);
MatrixXd A(1, 2);
C << 1.73,600;
A << 2, 64;
for (int i = 0; i < 1; i++)
{
printf("i=%i", i);
Leicester::CudaLib::CudaRBF::Gaussian2D(TX1, CN, A, C);
}
return 0;
}
|
64874c6b6bbfd4af575c97fbc45e6b8cc1f0e342.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include <sys/time.h>
// Device code
__global__ void kernel(int* d_A, int pitch,int height,int width)
{
for (int c = 0; c < height; ++c) {
for (int r = 0; r < width; ++r) {
int* row = (int*)((char*)d_A + r * pitch);
row[c] = row[c]*row[c];
}
}
}
//Host Code
int main()
{
int* d_A;
size_t pitch;
int *A;
int height,width;
height = width = 32;
int rows = height;
int cols = width;
A = (int *)malloc(rows*cols*sizeof(int));
for (int i = 0; i < rows*cols; i++) A[i] = i;
hipMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height);
hipMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, hipMemcpyHostToDevice);
for(int v1=29;v1>=1;v1-=2){
for(int v2=10000;v2>=100;v2-=100){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<5000000;j++)
hipLaunchKernelGGL(( kernel), dim3(100), dim3(32), 0, 0, d_A, pitch,v2,v1);
gettimeofday(&tv2, NULL);
printf ("%d %d %f\n",v1,v2,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
hipDeviceSynchronize();
}}
// for(int i=0;i<rows*cols;i++)
// printf("%d %d\n",A[i],d_A[i]);
return 0;
}
| 64874c6b6bbfd4af575c97fbc45e6b8cc1f0e342.cu | #include<stdio.h>
#include<cuda.h>
#include <sys/time.h>
// Device code
__global__ void kernel(int* d_A, int pitch,int height,int width)
{
for (int c = 0; c < height; ++c) {
for (int r = 0; r < width; ++r) {
int* row = (int*)((char*)d_A + r * pitch);
row[c] = row[c]*row[c];
}
}
}
//Host Code
int main()
{
int* d_A;
size_t pitch;
int *A;
int height,width;
height = width = 32;
int rows = height;
int cols = width;
A = (int *)malloc(rows*cols*sizeof(int));
for (int i = 0; i < rows*cols; i++) A[i] = i;
cudaMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height);
cudaMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, cudaMemcpyHostToDevice);
for(int v1=29;v1>=1;v1-=2){
for(int v2=10000;v2>=100;v2-=100){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<5000000;j++)
kernel<<<100, 32>>>(d_A, pitch,v2,v1);
gettimeofday(&tv2, NULL);
printf ("%d %d %f\n",v1,v2,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
cudaDeviceSynchronize();
}}
// for(int i=0;i<rows*cols;i++)
// printf("%d %d\n",A[i],d_A[i]);
return 0;
}
|
1cca2bc2ffaa2b835ef9198221b2e4b822a52be4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "obsToMod.h"
#include "kernel_common.h"
#include "mirrored_memory.h"
namespace dart {
static const LossFunctionType lossFunction = HuberLoss;
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_errorAndDataAssociationObsToMod(const float4 * obsVertMap,
const float4 * obsNormMap,
const int width,
const int height,
const SE3 T_mc,
const SE3 * T_fms,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int nSdfs,
const float distanceThreshold,
const float normThreshold,
const float planeOffset,
const float3 planeNormal,
int * lastElement,
DataAssociatedPoint * pts,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
const int index = x + y*width;
if (dbgDA) { debugDataAssociation[index] = -1; }
if (dbgErr) { debugError[index] = NAN; }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & xObs_c = obsVertMap[index];
if (xObs_c.w > 0) {
const float4 xObs_m = T_mc*xObs_c;
if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) {
// calculate distance
float sdfError = 1e20;
int grid = -1;
for (int g=0; g < nSdfs; ++g) {
const int f = sdfFrames[g];
const float4 xObs_f = T_fms[f]*xObs_m;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
if (!sdf.isInBoundsGradientInterp(xObs_g)) {
continue;
}
const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution;
//if (fabs(d) < fabs(sdf_error)) {
if (d < sdfError) {
sdfError = d;
grid = g;
}
}
// skip unassociated points and points beyond the distance threshold
if (sdfError*sdfError > distanceThreshold*distanceThreshold) { }
else {
const int f = sdfFrames[grid];
const float4 xObs_f = T_fms[f]*xObs_m;
const Grid3D<float> & sdf = sdfs[grid];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
// TODO: figure out what's going on with the -1
const float4 nPred = -1*(SE3Invert( T_fms[f]*T_mc )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0)));
if (dbgNorm) { debugNorm[index] = nPred; }
float4 v = obsNormMap[index];
float3 nObs = make_float3(0,0,0);
if (v.w > 0.0) {
v.w = 0;
nObs = make_float3(v);
if (dot(nPred,v) < normThreshold ) {
return;
}
}
if (dbgDA) { debugDataAssociation[index] = grid; }
if (dbgErr) { debugError[index] = sdfError; }
int myElement = atomicAdd(lastElement,1);
DataAssociatedPoint dt;
dt.index = index;
dt.dataAssociation = grid;
dt.error = sdfError;
pts[myElement] = dt;
}
}
}
}
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_errorAndDataAssociationObsToModMultiModel(const float4 * obsVertMap,
const float4 * obsNormMap,
const int width,
const int height,
const int nModels,
const SE3 * T_mcs,
const SE3 * const * T_fms,
const int * const * sdfFrames,
const Grid3D<float> * const * sdfs,
const int * nSdfs,
const float * distanceThresholds,
const float * normThresholds,
const float * planeOffsets,
const float3 * planeNormals,
int * lastElement,
DataAssociatedPoint * * pts,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
if (dbgDA) { debugDataAssociation[index] = -1; }
if (dbgErr) { debugError[index] = NAN; }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & xObs_c = obsVertMap[index];
if (xObs_c.w > 0) {
float sdfError = 1e20;
int associatedModel = -1;
int associatedGrid = -1;
for (int m=0; m<nModels; ++m) {
const float4 xObs_m = T_mcs[m]*xObs_c;
const float & planeOffset = planeOffsets[m];
const float3 & planeNormal = planeNormals[m];
if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) {
const int mNSdfs = nSdfs[m];
const int * mSdfFrames = sdfFrames[m];
const SE3 * mT_fms = T_fms[m];
const Grid3D<float> * mSdfs = sdfs[m];
for (int g=0; g<mNSdfs; ++g) {
const int f = mSdfFrames[g];
const float4 xObs_f = mT_fms[f]*xObs_m;
const Grid3D<float> & sdf = mSdfs[g];
//printf("model %d sdf %d is in frame %d\n",m,g,f);
//printf("%f ",sdf.resolution);
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
if (!sdf.isInBoundsGradientInterp(xObs_g)) {
continue;
}
const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution;
//printf("%f ",d);
// if (fabs(d) < fabs(sdfError) {
if (d < sdfError) {
//printf(".");
if (d*d < distanceThresholds[m]*distanceThresholds[m]) {
//printf("*");
sdfError = d;
associatedGrid = g;
associatedModel = m;
}
}
}
}
}
if (associatedModel != -1) {
// const int f = sdfFrames[associatedModel][associatedGrid];
// const float4 xObs_m = T_mcs[associatedModel]*xObs_c;
// const float4 xObs_f = T_fms[associatedModel][f]*xObs_m;
// const Grid3D<float> &sdf = sdfs[associatedModel][associatedGrid];
// const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
// const float4 nPred = 1*(SE3Invert( T_fms[associatedModel][f]*T_mcs[associatedModel] )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0)));
// float4 v = obsNormMap[index];
// float3 nObs = make_float3(0,0,0);
// if (v.w > 0.0) {
// v.w = 0;
// nObs = make_float3(v);
// if (dot(nPred,v) >= normThresholds[associatedModel]) {
if (dbgDA) { debugDataAssociation[index] = ((associatedModel << 16) | associatedGrid); }
if (dbgErr) { debugError[index] = sdfError; }
if (dbgNorm) { debugNorm[index] = obsNormMap[index]; }
int myElement = atomicAdd(&lastElement[associatedModel],1);
DataAssociatedPoint * mPts = pts[associatedModel];
DataAssociatedPoint dt;
dt.index = index;
dt.dataAssociation = associatedGrid;
dt.error = sdfError;
mPts[myElement] = dt;
// }
// }
}
}
}
template <bool dbgJs>
__global__ void gpu_normEqnsObsToMod(const int dims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
float * result,
float4 * debugJs) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= nPoints) {
return;
}
if (dbgJs) { debugJs[index] = make_float4(0); }
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * J = &s[threadIdx.x*dims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = SE3Rotate(T_mfs[obsFrame],sdfGrad_f);
getErrorJacobianOfModelPoint(J,xObs_m,obsFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
if (dbgJs) {
debugJs[index*dims + 0] = make_float4(1,0,0,1);
debugJs[index*dims + 1] = make_float4(0,1,0,1);
debugJs[index*dims + 2] = make_float4(0,0,1,1);
debugJs[index*dims + 3] = make_float4( 0,-xObs_m.z, xObs_m.y,1);
debugJs[index*dims + 4] = make_float4( xObs_m.z, 0,-xObs_m.x,1);
debugJs[index*dims + 5] = make_float4(-xObs_m.y, xObs_m.x, 0,1);
}
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again?
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again?
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<dims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
for (int j=0; j<=i; j++) {
float v2 = J[i]*J[j];
atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
}
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
__global__ void gpu_normEqnsObsToModReduced(const int fullDims,
const int redDims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
const float * dtheta_dalpha,
float * result) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (index >= nPoints) {
return;
}
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0)));
getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims);
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<redDims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
__global__ void gpu_normEqnsObsToModParamMap(const int fullDims,
const int redDims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
const int * dMapping,
float * result) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (index >= nPoints) {
return;
}
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0)));
getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doParamMapping(J,de_dtheta,dMapping,fullDims,redDims);
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<redDims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
void errorAndDataAssociation(const float4 * dObsVertMap,
const float4 * dObsNormMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int * dLastElement,
int * hLastElement,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block(16,8);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipMemset(dLastElement,0,sizeof(int));
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<false,false,false>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<false,false,true>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
} else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<false,true,false>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<false,true,true>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<true,false,false>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<true,false,true>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
} else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<true,true,false>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToMod<true,true,true>), dim3(grid),dim3(block), 0, 0, dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
}
}
hipMemcpy(hLastElement,dLastElement,sizeof(int),hipMemcpyDeviceToHost);
}
void errorAndDataAssociationMultiModel(const float4 * dObsVertMap,
const float4 * dObsNormMap,
const int width,
const int height,
const int nModels,
const SE3 * T_mcs,
const SE3 * const * T_fms,
const int * const * sdfFrames,
const Grid3D<float> * const * sdfs,
const int * nSdfs,
const float * distanceThresholds,
const float * normalThresholds,
const float * planeOffsets,
const float3 * planeNormals,
int * lastElements,
DataAssociatedPoint * * pts,
int * dDebugDataAssociation,
float * dDebugError,
float4 * dDebugNorm,
hipStream_t stream) {
hipMemset(lastElements,0,nModels*sizeof(int));
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
if (dDebugDataAssociation == 0) {
if (dDebugError == 0) {
if (dDebugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<false,false,false>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<false,false,true>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
} else {
if (dDebugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<false,true,false>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<false,true,true>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
}
} else {
if (dDebugError == 0) {
if (dDebugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<true,false,false>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<true,false,true>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
} else {
if (dDebugNorm == 0) {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<true,true,false>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
hipLaunchKernelGGL(( gpu_errorAndDataAssociationObsToModMultiModel<true,true,true>), dim3(grid),dim3(block),0,stream, dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
}
}
}
void normEqnsObsToMod(const int dims,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult,
float4 * debugJs) {
std::cout << nElements << " points associated to model " << model.getModelID() << std::endl;
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
hipMemset(dResult,0,(dims + JTJSize(dims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
if (debugJs == 0) {
hipLaunchKernelGGL(( gpu_normEqnsObsToMod<false>), dim3(grid),dim3(block),64*dims*sizeof(float), 0, dims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dResult,
debugJs);
} else {
hipLaunchKernelGGL(( gpu_normEqnsObsToMod<true>), dim3(grid),dim3(block),64*dims*sizeof(float), 0, dims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dResult,
debugJs);
}
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu_normEqnsObsToMod error: %s\n",hipGetErrorString(err));
}
#endif
}
}
}
void normEqnsObsToModReduced(const int dims,
const int reductionDims,
const float * d_dtheta_dalpha,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult) {
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
hipMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
hipLaunchKernelGGL(( gpu_normEqnsObsToModReduced), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims,
reductionDims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
d_dtheta_dalpha,
dResult);
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu_normEqnsObsToModReduced error: %s\n",hipGetErrorString(err));
}
#endif
}
}
}
void normEqnsObsToModParamMap(const int dims,
const int reductionDims,
const int * dMapping,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult) {
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
hipMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
hipLaunchKernelGGL(( gpu_normEqnsObsToModParamMap), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims,
reductionDims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dMapping,
dResult);
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu_normEqnsObsToModReduced error: %s\n",hipGetErrorString(err));
}
#endif
}
}
}
}
| 1cca2bc2ffaa2b835ef9198221b2e4b822a52be4.cu | #include "obsToMod.h"
#include "kernel_common.h"
#include "mirrored_memory.h"
namespace dart {
static const LossFunctionType lossFunction = HuberLoss;
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_errorAndDataAssociationObsToMod(const float4 * obsVertMap,
const float4 * obsNormMap,
const int width,
const int height,
const SE3 T_mc,
const SE3 * T_fms,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int nSdfs,
const float distanceThreshold,
const float normThreshold,
const float planeOffset,
const float3 planeNormal,
int * lastElement,
DataAssociatedPoint * pts,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
const int index = x + y*width;
if (dbgDA) { debugDataAssociation[index] = -1; }
if (dbgErr) { debugError[index] = NAN; }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & xObs_c = obsVertMap[index];
if (xObs_c.w > 0) {
const float4 xObs_m = T_mc*xObs_c;
if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) {
// calculate distance
float sdfError = 1e20;
int grid = -1;
for (int g=0; g < nSdfs; ++g) {
const int f = sdfFrames[g];
const float4 xObs_f = T_fms[f]*xObs_m;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
if (!sdf.isInBoundsGradientInterp(xObs_g)) {
continue;
}
const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution;
//if (fabs(d) < fabs(sdf_error)) {
if (d < sdfError) {
sdfError = d;
grid = g;
}
}
// skip unassociated points and points beyond the distance threshold
if (sdfError*sdfError > distanceThreshold*distanceThreshold) { }
else {
const int f = sdfFrames[grid];
const float4 xObs_f = T_fms[f]*xObs_m;
const Grid3D<float> & sdf = sdfs[grid];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
// TODO: figure out what's going on with the -1
const float4 nPred = -1*(SE3Invert( T_fms[f]*T_mc )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0)));
if (dbgNorm) { debugNorm[index] = nPred; }
float4 v = obsNormMap[index];
float3 nObs = make_float3(0,0,0);
if (v.w > 0.0) {
v.w = 0;
nObs = make_float3(v);
if (dot(nPred,v) < normThreshold ) {
return;
}
}
if (dbgDA) { debugDataAssociation[index] = grid; }
if (dbgErr) { debugError[index] = sdfError; }
int myElement = atomicAdd(lastElement,1);
DataAssociatedPoint dt;
dt.index = index;
dt.dataAssociation = grid;
dt.error = sdfError;
pts[myElement] = dt;
}
}
}
}
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_errorAndDataAssociationObsToModMultiModel(const float4 * obsVertMap,
const float4 * obsNormMap,
const int width,
const int height,
const int nModels,
const SE3 * T_mcs,
const SE3 * const * T_fms,
const int * const * sdfFrames,
const Grid3D<float> * const * sdfs,
const int * nSdfs,
const float * distanceThresholds,
const float * normThresholds,
const float * planeOffsets,
const float3 * planeNormals,
int * lastElement,
DataAssociatedPoint * * pts,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
if (dbgDA) { debugDataAssociation[index] = -1; }
if (dbgErr) { debugError[index] = NAN; }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & xObs_c = obsVertMap[index];
if (xObs_c.w > 0) {
float sdfError = 1e20;
int associatedModel = -1;
int associatedGrid = -1;
for (int m=0; m<nModels; ++m) {
const float4 xObs_m = T_mcs[m]*xObs_c;
const float & planeOffset = planeOffsets[m];
const float3 & planeNormal = planeNormals[m];
if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) {
const int mNSdfs = nSdfs[m];
const int * mSdfFrames = sdfFrames[m];
const SE3 * mT_fms = T_fms[m];
const Grid3D<float> * mSdfs = sdfs[m];
for (int g=0; g<mNSdfs; ++g) {
const int f = mSdfFrames[g];
const float4 xObs_f = mT_fms[f]*xObs_m;
const Grid3D<float> & sdf = mSdfs[g];
//printf("model %d sdf %d is in frame %d\n",m,g,f);
//printf("%f ",sdf.resolution);
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
if (!sdf.isInBoundsGradientInterp(xObs_g)) {
continue;
}
const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution;
//printf("%f ",d);
// if (fabs(d) < fabs(sdfError) {
if (d < sdfError) {
//printf(".");
if (d*d < distanceThresholds[m]*distanceThresholds[m]) {
//printf("*");
sdfError = d;
associatedGrid = g;
associatedModel = m;
}
}
}
}
}
if (associatedModel != -1) {
// const int f = sdfFrames[associatedModel][associatedGrid];
// const float4 xObs_m = T_mcs[associatedModel]*xObs_c;
// const float4 xObs_f = T_fms[associatedModel][f]*xObs_m;
// const Grid3D<float> &sdf = sdfs[associatedModel][associatedGrid];
// const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
// const float4 nPred = 1*(SE3Invert( T_fms[associatedModel][f]*T_mcs[associatedModel] )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0)));
// float4 v = obsNormMap[index];
// float3 nObs = make_float3(0,0,0);
// if (v.w > 0.0) {
// v.w = 0;
// nObs = make_float3(v);
// if (dot(nPred,v) >= normThresholds[associatedModel]) {
if (dbgDA) { debugDataAssociation[index] = ((associatedModel << 16) | associatedGrid); }
if (dbgErr) { debugError[index] = sdfError; }
if (dbgNorm) { debugNorm[index] = obsNormMap[index]; }
int myElement = atomicAdd(&lastElement[associatedModel],1);
DataAssociatedPoint * mPts = pts[associatedModel];
DataAssociatedPoint dt;
dt.index = index;
dt.dataAssociation = associatedGrid;
dt.error = sdfError;
mPts[myElement] = dt;
// }
// }
}
}
}
template <bool dbgJs>
__global__ void gpu_normEqnsObsToMod(const int dims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
float * result,
float4 * debugJs) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= nPoints) {
return;
}
if (dbgJs) { debugJs[index] = make_float4(0); }
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * J = &s[threadIdx.x*dims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = SE3Rotate(T_mfs[obsFrame],sdfGrad_f);
getErrorJacobianOfModelPoint(J,xObs_m,obsFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
if (dbgJs) {
debugJs[index*dims + 0] = make_float4(1,0,0,1);
debugJs[index*dims + 1] = make_float4(0,1,0,1);
debugJs[index*dims + 2] = make_float4(0,0,1,1);
debugJs[index*dims + 3] = make_float4( 0,-xObs_m.z, xObs_m.y,1);
debugJs[index*dims + 4] = make_float4( xObs_m.z, 0,-xObs_m.x,1);
debugJs[index*dims + 5] = make_float4(-xObs_m.y, xObs_m.x, 0,1);
}
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again?
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again?
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<dims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
for (int j=0; j<=i; j++) {
float v2 = J[i]*J[j];
atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
}
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
__global__ void gpu_normEqnsObsToModReduced(const int fullDims,
const int redDims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
const float * dtheta_dalpha,
float * result) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (index >= nPoints) {
return;
}
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0)));
getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims);
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<redDims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
__global__ void gpu_normEqnsObsToModParamMap(const int fullDims,
const int redDims,
const DataAssociatedPoint * pts,
const float4 * obsVertMap,
const int nPoints,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const int * sdfFrames,
const Grid3D<float> * sdfs,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float huberDelta,
const int * dMapping,
float * result) {
extern __shared__ float s[];
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (index >= nPoints) {
return;
}
const float4 xObs_m = T_mc*obsVertMap[pts[index].index];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
int obsFrame = sdfFrames[pts[index].dataAssociation];
const float4 xObs_f = T_fms[obsFrame]*xObs_m;
// compute SDF gradient
const int g = pts[index].dataAssociation;
const Grid3D<float> & sdf = sdfs[g];
const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f));
const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g);
const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0)));
getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doParamMapping(J,de_dtheta,dMapping,fullDims,redDims);
const float residual = pts[index].error;
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
switch(lossFunction) {
case SquaredLoss:
{
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
break;
case HuberLoss:
{
if (fabs(pts[index].error) < huberDelta ) {
computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ);
}
else {
float v = huberDelta;
if (pts[index].error < 0) {
v = -v;
}
for (int i=0; i<redDims; i++) {
if( J[i]==0.0f) continue;
atomicAdd(&JTr[i],v*-J[i]);
}
atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta));
}
}
break;
}
}
void errorAndDataAssociation(const float4 * dObsVertMap,
const float4 * dObsNormMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int * dLastElement,
int * hLastElement,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block(16,8);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dLastElement,0,sizeof(int));
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_errorAndDataAssociationObsToMod<false,false,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
gpu_errorAndDataAssociationObsToMod<false,false,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
} else {
if (debugNorm == 0) {
gpu_errorAndDataAssociationObsToMod<false,true,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
gpu_errorAndDataAssociationObsToMod<false,true,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_errorAndDataAssociationObsToMod<true,false,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
gpu_errorAndDataAssociationObsToMod<true,false,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
} else {
if (debugNorm == 0) {
gpu_errorAndDataAssociationObsToMod<true,true,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
} else {
gpu_errorAndDataAssociationObsToMod<true,true,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm);
}
}
}
cudaMemcpy(hLastElement,dLastElement,sizeof(int),cudaMemcpyDeviceToHost);
}
void errorAndDataAssociationMultiModel(const float4 * dObsVertMap,
const float4 * dObsNormMap,
const int width,
const int height,
const int nModels,
const SE3 * T_mcs,
const SE3 * const * T_fms,
const int * const * sdfFrames,
const Grid3D<float> * const * sdfs,
const int * nSdfs,
const float * distanceThresholds,
const float * normalThresholds,
const float * planeOffsets,
const float3 * planeNormals,
int * lastElements,
DataAssociatedPoint * * pts,
int * dDebugDataAssociation,
float * dDebugError,
float4 * dDebugNorm,
cudaStream_t stream) {
cudaMemset(lastElements,0,nModels*sizeof(int));
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
if (dDebugDataAssociation == 0) {
if (dDebugError == 0) {
if (dDebugNorm == 0) {
gpu_errorAndDataAssociationObsToModMultiModel<false,false,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
gpu_errorAndDataAssociationObsToModMultiModel<false,false,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
} else {
if (dDebugNorm == 0) {
gpu_errorAndDataAssociationObsToModMultiModel<false,true,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
gpu_errorAndDataAssociationObsToModMultiModel<false,true,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
}
} else {
if (dDebugError == 0) {
if (dDebugNorm == 0) {
gpu_errorAndDataAssociationObsToModMultiModel<true,false,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
gpu_errorAndDataAssociationObsToModMultiModel<true,false,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
} else {
if (dDebugNorm == 0) {
gpu_errorAndDataAssociationObsToModMultiModel<true,true,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
} else {
gpu_errorAndDataAssociationObsToModMultiModel<true,true,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm);
}
}
}
}
void normEqnsObsToMod(const int dims,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult,
float4 * debugJs) {
std::cout << nElements << " points associated to model " << model.getModelID() << std::endl;
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
cudaMemset(dResult,0,(dims + JTJSize(dims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
if (debugJs == 0) {
gpu_normEqnsObsToMod<false><<<grid,block,64*dims*sizeof(float)>>>(dims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dResult,
debugJs);
} else {
gpu_normEqnsObsToMod<true><<<grid,block,64*dims*sizeof(float)>>>(dims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dResult,
debugJs);
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu_normEqnsObsToMod error: %s\n",cudaGetErrorString(err));
}
#endif
}
}
}
void normEqnsObsToModReduced(const int dims,
const int reductionDims,
const float * d_dtheta_dalpha,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult) {
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
gpu_normEqnsObsToModReduced<<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims,
reductionDims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
d_dtheta_dalpha,
dResult);
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu_normEqnsObsToModReduced error: %s\n",cudaGetErrorString(err));
}
#endif
}
}
}
void normEqnsObsToModParamMap(const int dims,
const int reductionDims,
const int * dMapping,
const float4 * dObsVertMap,
const int width,
const int height,
const MirroredModel & model,
const OptimizationOptions & opts,
DataAssociatedPoint * dPts,
int nElements,
float * dResult) {
dim3 block;
if (height == 1) {
block.x = 128; block.y = block.z = 1;
}
else {
block.x = 16; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
// initilize system to zero
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
if (nElements > 10) {
block = dim3(64,1,1);
grid = dim3(ceil((double)nElements/64),1,1);
{
gpu_normEqnsObsToModParamMap<<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims,
reductionDims,
dPts,
dObsVertMap,
nElements,
model.getTransformCameraToModel(),
model.getDeviceTransformsModelToFrame(),
model.getDeviceTransformsFrameToModel(),
model.getDeviceSdfFrames(),
model.getDeviceSdfs(),
model.getDeviceDependencies(),
model.getDeviceJointTypes(),
model.getDeviceJointAxes(),
opts.huberDelta,
dMapping,
dResult);
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu_normEqnsObsToModReduced error: %s\n",cudaGetErrorString(err));
}
#endif
}
}
}
}
|
f7444dc391d49d67478fb97dbea135937056bdd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathReduce.cuh"
#include "THHTensor.hpp"
#include "../generic/THCTensorMathReduce.cu"
#include <THH/THHGenerateCharType.h>
| f7444dc391d49d67478fb97dbea135937056bdd0.cu | #include "../THCTensorMathReduce.cuh"
#include "THCTensor.hpp"
#include "../generic/THCTensorMathReduce.cu"
#include <THC/THCGenerateCharType.h>
|
a90c7a69d7e28de28e29eae69b28f4e1c17724b4.hip | // !!! This is a file automatically generated by hipify!!!
/*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#define _USE_MATH_DEFINES
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define NUM_POINTS 16384
void print_cloud(FILE* document, float* cloud, const char* name);
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
__global__
void RyT(float* R, float* T, float* P, float* Q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
Q[0 + i * 3] = R[0 + 0 * 3] * P[0 + i * 3] + R[0 + 1 * 3] * P[1 + i * 3] + R[0 + 2 * 3] * P[2 + i * 3] + T[0];
Q[1 + i * 3] = R[1 + 0 * 3] * P[0 + i * 3] + R[1 + 1 * 3] * P[1 + i * 3] + R[1 + 2 * 3] * P[2 + i * 3] + T[1];
Q[2 + i * 3] = R[2 + 0 * 3] * P[0 + i * 3] + R[2 + 1 * 3] * P[1 + i * 3] + R[2 + 2 * 3] * P[2 + i * 3] + T[2];
}
int main(void)
{
///////Block 1: Open and read the file with 64 LiDAR data packets//////
//int i = 0;
const int N_LINE = 128;
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");
if (!document) {
perror("File opening failed");
return (-1);
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS * sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//initial encoder counter (then grows with 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//get the first values of the encoder counter
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//read the ranges
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F) << 16) | word;
if (j > (idx_line + 2))//go to next channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//go to next azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//go to next lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//done
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
document = fopen("beam_intrinsics.csv", "r");
if (!document) {
perror("File opening failed");
return (-1);
}
float* h_altitude = NULL;
float* h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////End of Block 1///////
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds1 = 0;//for "Conversion"
float milliseconds2 = 0;//for "RyT"
int GridSize = 16;
int BlockSize = NUM_POINTS / GridSize;
hipError_t err = hipSuccess;//for checking errors in kernels
///////Block 2: Conversion to Cartesian coordinates///////
//allocate memory for storing the cloud (format: x1y1z1 x2y2z2 x3y3z3 ...)
float* h_P = (float*)malloc(3 * bytes_r);
float* d_P = NULL;
float* d_r = NULL;
float* d_azimuth = NULL;
float* d_altitude = NULL;
unsigned long int* d_encoder_count;
hipMalloc(&d_P, 3 * bytes_r);
hipMalloc(&d_r, bytes_r);
hipMalloc(&d_azimuth, bytes_angles);
hipMalloc(&d_altitude, bytes_angles);
hipMalloc(&d_encoder_count, sizeof(unsigned long int));
//move data to GPU
hipMemcpy(d_r, h_r, bytes_r, hipMemcpyHostToDevice);
hipMemcpy(d_azimuth, h_azimuth, bytes_angles, hipMemcpyHostToDevice);
hipMemcpy(d_altitude, h_altitude, bytes_angles, hipMemcpyHostToDevice);
hipMemcpy(d_encoder_count, &h_encoder_count, sizeof(unsigned long int), hipMemcpyHostToDevice);
//Launch "Conversion" kernel
hipEventRecord(start);
Conversion << <GridSize, BlockSize >> > (d_r, d_encoder_count, d_altitude, d_azimuth, d_P);
err = hipGetLastError();
if (err != hipSuccess) printf("Error in Conversion kernel: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds1, start, stop);
printf("Conversion kernel's elapsed time: %.3f ms\n", milliseconds1);
///////End of Block 2///////
///////Block 3: Compute the rotation and translation///////
//converted cloud
float* h_Q = (float*)malloc(3 * bytes_r);
float* d_Q = NULL;
hipMalloc(&d_Q, 3 * bytes_r);
//rotation matrix and translation vector
float* h_R = (float*)malloc(9 * sizeof(float));
float* h_T = (float*)malloc(3 * sizeof(float));
float* d_R, * d_T;
hipMalloc(&d_R, 9 * sizeof(float));
hipMalloc(&d_T, 3 * sizeof(float));
//Translation values
h_T[0] = 0.8f;//x
h_T[1] = -0.3f;//y
h_T[2] = 0.2f;//z
//Rotation values (rad)
float rx = 0.2f;//axis x
float ry = -0.2f;//axis y
float rz = 0.05f;//axis z
float cx = (float)cos(rx); float cy = (float)cos(ry); float cz = (float)cos(rz);
float sx = (float)sin(rx); float sy = (float)sin(ry); float sz = (float)sin(rz);
h_R[0] = cy * cz; h_R[1] = (cz * sx * sy) + (cx * sz); h_R[2] = -(cx * cz * sy) + (sx * sz);
h_R[3] = -cy * sz; h_R[4] = (cx * cz) - (sx * sy * sz); h_R[5] = (cx * sy * sz) + (cz * sx);
h_R[6] = sy; h_R[7] = -cy * sx; h_R[8] = cx * cy;
//move data to GPU
hipMemcpy(d_R, h_R, 9 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_T, h_T, 3 * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(start);
RyT << <GridSize, BlockSize >> > (d_R, d_T, d_P, d_Q);
err = hipGetLastError();
if (err != hipSuccess) printf("Error in RyT kernel: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds2, start, stop);
printf("RyT kernel's elapsed time: %.3f ms\n", milliseconds2);
//move data back to the CPU
hipMemcpy(h_P, d_P, 3 * bytes_r, hipMemcpyDeviceToHost);
hipMemcpy(h_Q, d_Q, 3 * bytes_r, hipMemcpyDeviceToHost);
///////End of Block 3///////
/*Print clouds into a file*/
print_cloud(document, h_P, "GPU_Output_file_P.csv");
print_cloud(document, h_Q, "GPU_Output_file_Q.csv");
printf("Success!\n");
//Free variables
free(h_P), hipFree(d_P);
free(h_Q), hipFree(d_Q);
free(h_T), free(h_R);
hipFree(d_T), hipFree(d_R);
free(h_r), free(h_altitude), free(h_azimuth);
hipFree(d_r), hipFree(d_altitude), hipFree(d_azimuth), hipFree(d_encoder_count);
return 0;
}
void print_cloud(FILE* document, float* cloud, const char* name)
{
int i, j;
fopen_s(&document, name, "w");
if (!document) printf("Error: File opening failed\n");
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.3f, ", cloud[j + i * 3]);
fprintf(document, "%.3f\n ", cloud[j + i * 3]);
}
fprintf(document, "\n");
fclose(document);
}
| a90c7a69d7e28de28e29eae69b28f4e1c17724b4.cu | /*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#define _USE_MATH_DEFINES
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define NUM_POINTS 16384
void print_cloud(FILE* document, float* cloud, const char* name);
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
__global__
void RyT(float* R, float* T, float* P, float* Q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
Q[0 + i * 3] = R[0 + 0 * 3] * P[0 + i * 3] + R[0 + 1 * 3] * P[1 + i * 3] + R[0 + 2 * 3] * P[2 + i * 3] + T[0];
Q[1 + i * 3] = R[1 + 0 * 3] * P[0 + i * 3] + R[1 + 1 * 3] * P[1 + i * 3] + R[1 + 2 * 3] * P[2 + i * 3] + T[1];
Q[2 + i * 3] = R[2 + 0 * 3] * P[0 + i * 3] + R[2 + 1 * 3] * P[1 + i * 3] + R[2 + 2 * 3] * P[2 + i * 3] + T[2];
}
int main(void)
{
///////Block 1: Open and read the file with 64 LiDAR data packets//////
//int i = 0;
const int N_LINE = 128;
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");
if (!document) {
perror("File opening failed");
return (-1);
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS * sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//initial encoder counter (then grows with 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//get the first values of the encoder counter
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//read the ranges
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F) << 16) | word;
if (j > (idx_line + 2))//go to next channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//go to next azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//go to next lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//done
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
document = fopen("beam_intrinsics.csv", "r");
if (!document) {
perror("File opening failed");
return (-1);
}
float* h_altitude = NULL;
float* h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////End of Block 1///////
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds1 = 0;//for "Conversion"
float milliseconds2 = 0;//for "RyT"
int GridSize = 16;
int BlockSize = NUM_POINTS / GridSize;
cudaError_t err = cudaSuccess;//for checking errors in kernels
///////Block 2: Conversion to Cartesian coordinates///////
//allocate memory for storing the cloud (format: x1y1z1 x2y2z2 x3y3z3 ...)
float* h_P = (float*)malloc(3 * bytes_r);
float* d_P = NULL;
float* d_r = NULL;
float* d_azimuth = NULL;
float* d_altitude = NULL;
unsigned long int* d_encoder_count;
cudaMalloc(&d_P, 3 * bytes_r);
cudaMalloc(&d_r, bytes_r);
cudaMalloc(&d_azimuth, bytes_angles);
cudaMalloc(&d_altitude, bytes_angles);
cudaMalloc(&d_encoder_count, sizeof(unsigned long int));
//move data to GPU
cudaMemcpy(d_r, h_r, bytes_r, cudaMemcpyHostToDevice);
cudaMemcpy(d_azimuth, h_azimuth, bytes_angles, cudaMemcpyHostToDevice);
cudaMemcpy(d_altitude, h_altitude, bytes_angles, cudaMemcpyHostToDevice);
cudaMemcpy(d_encoder_count, &h_encoder_count, sizeof(unsigned long int), cudaMemcpyHostToDevice);
//Launch "Conversion" kernel
cudaEventRecord(start);
Conversion << <GridSize, BlockSize >> > (d_r, d_encoder_count, d_altitude, d_azimuth, d_P);
err = cudaGetLastError();
if (err != cudaSuccess) printf("Error in Conversion kernel: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds1, start, stop);
printf("Conversion kernel's elapsed time: %.3f ms\n", milliseconds1);
///////End of Block 2///////
///////Block 3: Compute the rotation and translation///////
//converted cloud
float* h_Q = (float*)malloc(3 * bytes_r);
float* d_Q = NULL;
cudaMalloc(&d_Q, 3 * bytes_r);
//rotation matrix and translation vector
float* h_R = (float*)malloc(9 * sizeof(float));
float* h_T = (float*)malloc(3 * sizeof(float));
float* d_R, * d_T;
cudaMalloc(&d_R, 9 * sizeof(float));
cudaMalloc(&d_T, 3 * sizeof(float));
//Translation values
h_T[0] = 0.8f;//x
h_T[1] = -0.3f;//y
h_T[2] = 0.2f;//z
//Rotation values (rad)
float rx = 0.2f;//axis x
float ry = -0.2f;//axis y
float rz = 0.05f;//axis z
float cx = (float)cos(rx); float cy = (float)cos(ry); float cz = (float)cos(rz);
float sx = (float)sin(rx); float sy = (float)sin(ry); float sz = (float)sin(rz);
h_R[0] = cy * cz; h_R[1] = (cz * sx * sy) + (cx * sz); h_R[2] = -(cx * cz * sy) + (sx * sz);
h_R[3] = -cy * sz; h_R[4] = (cx * cz) - (sx * sy * sz); h_R[5] = (cx * sy * sz) + (cz * sx);
h_R[6] = sy; h_R[7] = -cy * sx; h_R[8] = cx * cy;
//move data to GPU
cudaMemcpy(d_R, h_R, 9 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_T, h_T, 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start);
RyT << <GridSize, BlockSize >> > (d_R, d_T, d_P, d_Q);
err = cudaGetLastError();
if (err != cudaSuccess) printf("Error in RyT kernel: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds2, start, stop);
printf("RyT kernel's elapsed time: %.3f ms\n", milliseconds2);
//move data back to the CPU
cudaMemcpy(h_P, d_P, 3 * bytes_r, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Q, d_Q, 3 * bytes_r, cudaMemcpyDeviceToHost);
///////End of Block 3///////
/*Print clouds into a file*/
print_cloud(document, h_P, "GPU_Output_file_P.csv");
print_cloud(document, h_Q, "GPU_Output_file_Q.csv");
printf("Success!\n");
//Free variables
free(h_P), cudaFree(d_P);
free(h_Q), cudaFree(d_Q);
free(h_T), free(h_R);
cudaFree(d_T), cudaFree(d_R);
free(h_r), free(h_altitude), free(h_azimuth);
cudaFree(d_r), cudaFree(d_altitude), cudaFree(d_azimuth), cudaFree(d_encoder_count);
return 0;
}
void print_cloud(FILE* document, float* cloud, const char* name)
{
int i, j;
fopen_s(&document, name, "w");
if (!document) printf("Error: File opening failed\n");
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.3f, ", cloud[j + i * 3]);
fprintf(document, "%.3f\n ", cloud[j + i * 3]);
}
fprintf(document, "\n");
fclose(document);
}
|
783e0cd91885b2e0136de53494f67f8ca3b5924b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include "timer.c"
#include "common.h"
#include "kd.cu"
#if defined (GPU) || defined (GPU_KD)
// From the NVIDIA CUDA programming guide. No idea how it works
__device__ double atomicAdd(double* address, double val)
{
double old = *address, assumed;
do {
assumed = old;
old = __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(assumed),
__double_as_longlong(val + assumed)));
} while (assumed != old);
return old;
}
__global__ void ResetCentroidForEachCluster(Cluster *clusters)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
// Set clusters[].noOfPoints to 0. Only for K centroids
if (pt < K) {
clusters[pt].noOfPoints = 0;
}
}
__global__ void ComputeClusters(Point *points, Cluster *clusters, Point *tempPoints)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
int i; double max; int inCluster;
if (pt >= N)
return;
// Save the old centroid and clear the x and y components of
// each point. We're going to use first K of these to store
// the sum of co-ordinates of points in this cluster.
// clusterId field is used to save old centroid for each point
// so that we know when to stop iterating.
tempPoints[pt].clusterId = points[pt].clusterId;
tempPoints[pt].loc[X_AXIS] = 0.0;
tempPoints[pt].loc[Y_AXIS] = 0.0;
// Compute the nearest centroid.
max = GetDistanceGPU(points[pt], clusters[0].pt);
inCluster = 0;
for (i = 0; i < K; i++) {
if (GetDistanceGPU(points[pt], clusters[i].pt) < max) {
inCluster = i;
max = GetDistanceGPU(points[pt], clusters[i].pt);
}
}
atomicAdd(&clusters[inCluster].noOfPoints, 1);
// Bottle neck I'm sure.
atomicAdd(&tempPoints[inCluster].loc[X_AXIS], points[pt].loc[X_AXIS]);
atomicAdd(&tempPoints[inCluster].loc[Y_AXIS], points[pt].loc[Y_AXIS]);
points[pt].clusterId = inCluster;
}
__global__ void ComputeCentroids(Cluster *clusters, Point *tempPoints)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
// Now calculate the new centroids.
if (pt < K) {
clusters[pt].pt.loc[X_AXIS] = tempPoints[pt].loc[X_AXIS]/clusters[pt].noOfPoints;
clusters[pt].pt.loc[Y_AXIS] = tempPoints[pt].loc[Y_AXIS]/clusters[pt].noOfPoints;
}
}
__global__ void RepeatNeeded(Point *points, Point *tempPoints, unsigned int *key)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
if (pt < N) {
if (points[pt].clusterId != tempPoints[pt].clusterId) {
*key = 1;
}
}
}
#if defined (GPU_KD)
__global__ void ComputeClustersKdTree(Point *points, Cluster *clusters, Point *tempPoints, KdTree *kdTree, char *visitedPerThread)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
int inCluster;
Cluster *nearestCluster;
char *visitedForThisThread = visitedPerThread +(K*pt);
if (pt >= N)
return;
// Save the old centroid and clear the x and y components of
// each point. We're going to use first K of these to store
// the sum of co-ordinates of points in this cluster.
// clusterId field is used to save old centroid for each point
// so that we know when to stop iterating.
tempPoints[pt].clusterId = points[pt].clusterId;
tempPoints[pt].loc[X_AXIS] = 0.0;
tempPoints[pt].loc[Y_AXIS] = 0.0;
// Compute the nearest centroid.
nearestCluster = NearestNeighbourGPU
(kdTree, clusters, visitedForThisThread, points[pt], K);
inCluster = nearestCluster->pt.clusterId;
atomicAdd(&clusters[inCluster].noOfPoints, 1);
// Bottle neck I'm sure.
atomicAdd(&tempPoints[inCluster].loc[X_AXIS], points[pt].loc[X_AXIS]);
atomicAdd(&tempPoints[inCluster].loc[Y_AXIS], points[pt].loc[Y_AXIS]);
points[pt].clusterId = inCluster;
}
void DoKmeansGPUKdTree (Point *points, Cluster *clusters)
{
Point *dPoints, *dTempPoints;
Cluster *dClusters; unsigned int *repeat, repeatHost;
KdTree *kdTree, *dKdTree;
char *visitedPerThread;
hipMalloc ((void **)&dPoints, sizeof(Point)*N);
hipMalloc ((void **)&dClusters, sizeof(Cluster)*K);
hipMalloc ((void **)&dTempPoints, sizeof(Point)*N);
hipMalloc ((void **)&repeat, sizeof(unsigned int));
hipMalloc ((void **)&visitedPerThread, sizeof(char)*K*N);
hipMalloc ((void **)&dKdTree, sizeof(KdTree)*K);
hipMemcpy(dPoints, points, sizeof(Point)*N, hipMemcpyHostToDevice);
hipMemcpy(dClusters, clusters, sizeof(Cluster)*K, hipMemcpyHostToDevice);
dim3 threadsPerBlock (256);
dim3 blocksPerGrid (N/threadsPerBlock.x);
do {
hipLaunchKernelGGL(( ResetCentroidForEachCluster), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dClusters);
// Copy kdtree between host and device. (KdTree computation is done in host).
hipMemcpy(clusters, dClusters, sizeof(Cluster)*K, hipMemcpyDeviceToHost);
kdTree = BuildKdTree(clusters, K, false);
hipMemcpy(dKdTree, kdTree, sizeof(KdTree)*K, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ComputeClustersKdTree), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
dPoints, dClusters, dTempPoints, dKdTree, visitedPerThread);
hipLaunchKernelGGL(( ComputeCentroids), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dClusters, dTempPoints);
hipMemset(repeat, 0, sizeof(unsigned int));
hipLaunchKernelGGL(( RepeatNeeded), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dPoints, dTempPoints, repeat);
hipMemcpy(&repeatHost, repeat, sizeof(unsigned int), hipMemcpyDeviceToHost);
} while (repeatHost);
hipMemcpy(points, dPoints, sizeof(Point)*N, hipMemcpyDeviceToHost);
hipMemcpy(clusters, dClusters, sizeof(Cluster)*K, hipMemcpyDeviceToHost);
hipFree(dPoints);
hipFree(dClusters);
hipFree(dTempPoints);
hipFree(repeat);
hipFree(visitedPerThread);
hipFree(dKdTree);
}
#endif // defined (GPU_KD)
void DoKmeansGPU (Point *points, Cluster *clusters)
{
Point *dPoints, *dTempPoints;
Cluster *dClusters; unsigned int *repeat, repeatHost;
hipMalloc ((void **)&dPoints, sizeof(Point)*N);
hipMalloc ((void **)&dClusters, sizeof(Cluster)*K);
hipMalloc ((void **)&dTempPoints, sizeof(Point)*N);
hipMalloc ((void **)&repeat, sizeof(unsigned int));
hipMemcpy(dPoints, points, sizeof(Point)*N, hipMemcpyHostToDevice);
hipMemcpy(dClusters, clusters, sizeof(Cluster)*K, hipMemcpyHostToDevice);
dim3 threadsPerBlock (256);
dim3 blocksPerGrid (N/threadsPerBlock.x);
do {
hipLaunchKernelGGL(( ResetCentroidForEachCluster), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dClusters);
hipLaunchKernelGGL(( ComputeClusters), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dPoints, dClusters, dTempPoints);
hipLaunchKernelGGL(( ComputeCentroids), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dClusters, dTempPoints);
hipMemset(repeat, 0, sizeof(unsigned int));
hipLaunchKernelGGL(( RepeatNeeded), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dPoints, dTempPoints, repeat);
hipMemcpy(&repeatHost, repeat, sizeof(unsigned int), hipMemcpyDeviceToHost);
} while (repeatHost);
hipMemcpy(points, dPoints, sizeof(Point)*N, hipMemcpyDeviceToHost);
hipMemcpy(clusters, dClusters, sizeof(Cluster)*K, hipMemcpyDeviceToHost);
hipFree(dPoints);
hipFree(dClusters);
hipFree(dTempPoints);
hipFree(repeat);
}
#endif // defined(GPU) || defined(GPU_KD)
#if defined (CPU_KD)
void DoKmeansCPUKdTree (Point *points, Cluster *clusters)
{
int i, inCluster;
bool changed;
Point *tempPoints;
Cluster *nearestCluster;
KdTree *kdTree;
// One for each cluster (and point). For cluster, use x and y, for point, clusterId.
tempPoints = (Point *) malloc (sizeof(Point)*N);
do {
memset(tempPoints, 0, sizeof(Point)*N);
for (i = 0; i < K; i++) {
clusters[i].noOfPoints = 0;
}
// Save the old clusterId for each point. Reusing tempPoints
for (i = 0; i < N; i++) {
tempPoints[i].clusterId = points[i].clusterId;
}
kdTree = BuildKdTree(clusters, K, false);
// For each point, find the nearest centroid.
for (i = 0; i < N; i++) {
nearestCluster = NearestNeighbour(kdTree, clusters, points[i]);
inCluster = nearestCluster->pt.clusterId;
clusters[inCluster].noOfPoints++;
tempPoints[inCluster].loc[X_AXIS] += points[i].loc[X_AXIS];
tempPoints[inCluster].loc[Y_AXIS] += points[i].loc[Y_AXIS];
points[i].clusterId = inCluster;
}
// Compute new centroid for each cluster
for (i = 0; i < K; i++) {
// Assuming that each cluster has atleast one point in it.
assert(clusters[i].noOfPoints != 0);
clusters[i].pt.loc[X_AXIS] = tempPoints[i].loc[X_AXIS]/clusters[i].noOfPoints;
clusters[i].pt.loc[Y_AXIS] = tempPoints[i].loc[Y_AXIS]/clusters[i].noOfPoints;
}
// Check if anything has changed
changed = false;
for (i = 0; i < N; i++) {
if (points[i].clusterId != tempPoints[i].clusterId) {
changed = true;
break;
}
}
} while (changed);
}
#endif // #if defined (CPU_KD)
void DoKmeansCPU (Point *points, Cluster *clusters)
{
double max;
int i, j, inCluster;
bool changed;
Point *tempPoints;
// One for each cluster (and point). For cluster, use x and y, for point, clusterId.
tempPoints = (Point *) malloc (sizeof(Point)*N);
do {
memset(tempPoints, 0, sizeof(Point)*N);
for (i = 0; i < K; i++) {
clusters[i].noOfPoints = 0;
}
// Save the old clusterId for each point. Reusing tempPoints
for (i = 0; i < N; i++) {
tempPoints[i].clusterId = points[i].clusterId;
}
// For each point, find the nearest centroid.
for (i = 0; i < N; i++) {
max = GetDistance(points[i], clusters[0].pt);
inCluster = 0;
for (j = 0; j < K; j++) {
if (GetDistance(points[i], clusters[j].pt) < max) {
inCluster = j;
// TODO: We should next store these distances, instead of re-computing
// (I don't mean from above call, I mean totally for the program).
max = GetDistance(points[i], clusters[j].pt);
}
}
clusters[inCluster].noOfPoints++;
tempPoints[inCluster].loc[X_AXIS] += points[i].loc[X_AXIS];
tempPoints[inCluster].loc[Y_AXIS] += points[i].loc[Y_AXIS];
points[i].clusterId = inCluster;
}
// Compute new centroid for each cluster
for (i = 0; i < K; i++) {
// Assuming that each cluster has atleast one point in it.
assert(clusters[i].noOfPoints != 0);
clusters[i].pt.loc[X_AXIS] = tempPoints[i].loc[X_AXIS]/clusters[i].noOfPoints;
clusters[i].pt.loc[Y_AXIS] = tempPoints[i].loc[Y_AXIS]/clusters[i].noOfPoints;
}
// Check if anything has changed
changed = false;
for (i = 0; i < N; i++) {
if (points[i].clusterId != tempPoints[i].clusterId) {
changed = true;
break;
}
}
} while (changed);
}
int main (int argc, char *argv[])
{
Point *pointsCPU;
Cluster *clustersCPU;
int i, j;
#ifdef GPU
Point *pointsGPU;
Cluster *clustersGPU;
#endif
#ifdef GPU_KD
Point *pointsGPUKdTree;
Cluster *clustersGPUKdTree;
#endif
#ifdef CPU_KD
Point *pointsCPUKdTree;
Cluster *clustersCPUKdTree;
#endif
srandom(time(NULL));
pointsCPU = (Point *) malloc (sizeof(Point)*N);
clustersCPU = (Cluster *) malloc (sizeof(Cluster)*K);
// Get the points randomly
for (i = 0; i < N; i++) {
pointsCPU[i].loc[X_AXIS] = (random()/1021322);
pointsCPU[i].loc[Y_AXIS] = (random()/1021322);
pointsCPU[i].clusterId = -1;
}
// Initialize clusters
for (i = 0; i < K; i++) {
clustersCPU[i].pt.clusterId = i;
clustersCPU[i].noOfPoints = 0;
j = random()%N;
if (pointsCPU[j].clusterId != -1) {
i--; continue;
// Potential infinite loop
}
pointsCPU[j].clusterId = i;
clustersCPU[i].pt.loc[X_AXIS] = pointsCPU[j].loc[X_AXIS];
clustersCPU[i].pt.loc[Y_AXIS] = pointsCPU[j].loc[Y_AXIS];
}
#ifdef DEBUG
printf ("Initial points:\n");
for (i = 0; i < N; i++) {
printf ("x=%.2f,y=%.2f,clusterId=%d\n", pointsCPU[i].loc[X_AXIS], pointsCPU[i].loc[Y_AXIS], pointsCPU[i].clusterId);
}
printf ("Initial clusters:\n");
for (i = 0; i < K; i++) {
printf("clusterId=%d,noOfPoints=%d,centroidX=%.2f,centroidY=%.2f\n", clustersCPU[i].pt.clusterId,
clustersCPU[i].noOfPoints, clustersCPU[i].pt.loc[X_AXIS], clustersCPU[i].pt.loc[Y_AXIS]);
}
#endif // DEBUG
#ifdef GPU_KD
pointsGPUKdTree = (Point *) malloc (sizeof(Point)*N);
clustersGPUKdTree = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsGPUKdTree, pointsCPU, sizeof(Point)*N);
memcpy(clustersGPUKdTree, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansGPUKdTree(pointsGPUKdTree, clustersGPUKdTree);
tend();
printf("%f seconds on GPU KdTree.\n", tval());
#endif
#ifdef GPU
pointsGPU = (Point *) malloc (sizeof(Point)*N);
clustersGPU = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsGPU, pointsCPU, sizeof(Point)*N);
memcpy(clustersGPU, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansGPU(pointsGPU, clustersGPU);
tend();
printf("%f seconds on GPU.\n", tval());
#endif
#ifdef CPU_KD
pointsCPUKdTree = (Point *) malloc (sizeof(Point)*N);
clustersCPUKdTree = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsCPUKdTree, pointsCPU, sizeof(Point)*N);
memcpy(clustersCPUKdTree, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansCPUKdTree(pointsCPUKdTree, clustersCPUKdTree);
tend();
printf("%f seconds on CPU KdTree.\n", tval());
#endif
// Note plain CPU should always be at the end. Data for other versions are
// copied from here. So don't want it to change before copying.
tstart();
DoKmeansCPU(pointsCPU, clustersCPU);
tend();
printf("%f seconds on CPU.\n", tval());
#ifdef PRETTY_PRINT
#if defined (GPU_KD)
// Showing GPU_KD dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsGPUKdTree[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d GPUKd\n", pointsGPUKdTree[i].loc[X_AXIS], pointsGPUKdTree[i].loc[Y_AXIS], pointsGPUKdTree[i].clusterId);
fclose(fp);
}
}
#elif defined (GPU)
// Showing GPU dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsGPU[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d GPU\n", pointsGPU[i].loc[X_AXIS], pointsGPU[i].loc[Y_AXIS], pointsGPU[i].clusterId);
fclose(fp);
}
}
#elif defined(CPU_KD)
// Showing CPU_KD dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsCPUKdTree[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d CPUKd\n", pointsCPUKdTree[i].loc[X_AXIS], pointsCPUKdTree[i].loc[Y_AXIS], pointsCPUKdTree[i].clusterId);
fclose(fp);
}
}
#else
// Showing CPU dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsCPU[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d CPU\n", pointsCPU[i].loc[X_AXIS], pointsCPU[i].loc[Y_AXIS], pointsCPU[i].clusterId);
fclose(fp);
}
}
#endif // #if defined (GPU_KD)
#endif // PRETTY_PRINT
return 0;
}
#if 0
/********** Pretty print script ***********
// string=""
// for plot in /tmp/*.plot
// do
// string="${string},\"$plot\""
// done
//
// string=`cut -c 2- <<EOF
/ $string
// EOF`
//
// echo "set key off" > /tmp/plot
// echo "plot $string" >> /tmp/plot
// gnuplot -persist < /tmp/plot
//
// # ah
************** End script ****************/
#endif
| 783e0cd91885b2e0136de53494f67f8ca3b5924b.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include "timer.c"
#include "common.h"
#include "kd.cu"
#if defined (GPU) || defined (GPU_KD)
// From the NVIDIA CUDA programming guide. No idea how it works
__device__ double atomicAdd(double* address, double val)
{
double old = *address, assumed;
do {
assumed = old;
old = __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(assumed),
__double_as_longlong(val + assumed)));
} while (assumed != old);
return old;
}
__global__ void ResetCentroidForEachCluster(Cluster *clusters)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
// Set clusters[].noOfPoints to 0. Only for K centroids
if (pt < K) {
clusters[pt].noOfPoints = 0;
}
}
__global__ void ComputeClusters(Point *points, Cluster *clusters, Point *tempPoints)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
int i; double max; int inCluster;
if (pt >= N)
return;
// Save the old centroid and clear the x and y components of
// each point. We're going to use first K of these to store
// the sum of co-ordinates of points in this cluster.
// clusterId field is used to save old centroid for each point
// so that we know when to stop iterating.
tempPoints[pt].clusterId = points[pt].clusterId;
tempPoints[pt].loc[X_AXIS] = 0.0;
tempPoints[pt].loc[Y_AXIS] = 0.0;
// Compute the nearest centroid.
max = GetDistanceGPU(points[pt], clusters[0].pt);
inCluster = 0;
for (i = 0; i < K; i++) {
if (GetDistanceGPU(points[pt], clusters[i].pt) < max) {
inCluster = i;
max = GetDistanceGPU(points[pt], clusters[i].pt);
}
}
atomicAdd(&clusters[inCluster].noOfPoints, 1);
// Bottle neck I'm sure.
atomicAdd(&tempPoints[inCluster].loc[X_AXIS], points[pt].loc[X_AXIS]);
atomicAdd(&tempPoints[inCluster].loc[Y_AXIS], points[pt].loc[Y_AXIS]);
points[pt].clusterId = inCluster;
}
__global__ void ComputeCentroids(Cluster *clusters, Point *tempPoints)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
// Now calculate the new centroids.
if (pt < K) {
clusters[pt].pt.loc[X_AXIS] = tempPoints[pt].loc[X_AXIS]/clusters[pt].noOfPoints;
clusters[pt].pt.loc[Y_AXIS] = tempPoints[pt].loc[Y_AXIS]/clusters[pt].noOfPoints;
}
}
__global__ void RepeatNeeded(Point *points, Point *tempPoints, unsigned int *key)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
if (pt < N) {
if (points[pt].clusterId != tempPoints[pt].clusterId) {
*key = 1;
}
}
}
#if defined (GPU_KD)
__global__ void ComputeClustersKdTree(Point *points, Cluster *clusters, Point *tempPoints, KdTree *kdTree, char *visitedPerThread)
{
int pt = blockIdx.x*blockDim.x + threadIdx.x;
int inCluster;
Cluster *nearestCluster;
char *visitedForThisThread = visitedPerThread +(K*pt);
if (pt >= N)
return;
// Save the old centroid and clear the x and y components of
// each point. We're going to use first K of these to store
// the sum of co-ordinates of points in this cluster.
// clusterId field is used to save old centroid for each point
// so that we know when to stop iterating.
tempPoints[pt].clusterId = points[pt].clusterId;
tempPoints[pt].loc[X_AXIS] = 0.0;
tempPoints[pt].loc[Y_AXIS] = 0.0;
// Compute the nearest centroid.
nearestCluster = NearestNeighbourGPU
(kdTree, clusters, visitedForThisThread, points[pt], K);
inCluster = nearestCluster->pt.clusterId;
atomicAdd(&clusters[inCluster].noOfPoints, 1);
// Bottle neck I'm sure.
atomicAdd(&tempPoints[inCluster].loc[X_AXIS], points[pt].loc[X_AXIS]);
atomicAdd(&tempPoints[inCluster].loc[Y_AXIS], points[pt].loc[Y_AXIS]);
points[pt].clusterId = inCluster;
}
void DoKmeansGPUKdTree (Point *points, Cluster *clusters)
{
Point *dPoints, *dTempPoints;
Cluster *dClusters; unsigned int *repeat, repeatHost;
KdTree *kdTree, *dKdTree;
char *visitedPerThread;
cudaMalloc ((void **)&dPoints, sizeof(Point)*N);
cudaMalloc ((void **)&dClusters, sizeof(Cluster)*K);
cudaMalloc ((void **)&dTempPoints, sizeof(Point)*N);
cudaMalloc ((void **)&repeat, sizeof(unsigned int));
cudaMalloc ((void **)&visitedPerThread, sizeof(char)*K*N);
cudaMalloc ((void **)&dKdTree, sizeof(KdTree)*K);
cudaMemcpy(dPoints, points, sizeof(Point)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dClusters, clusters, sizeof(Cluster)*K, cudaMemcpyHostToDevice);
dim3 threadsPerBlock (256);
dim3 blocksPerGrid (N/threadsPerBlock.x);
do {
ResetCentroidForEachCluster<<<blocksPerGrid, threadsPerBlock>>>(dClusters);
// Copy kdtree between host and device. (KdTree computation is done in host).
cudaMemcpy(clusters, dClusters, sizeof(Cluster)*K, cudaMemcpyDeviceToHost);
kdTree = BuildKdTree(clusters, K, false);
cudaMemcpy(dKdTree, kdTree, sizeof(KdTree)*K, cudaMemcpyHostToDevice);
ComputeClustersKdTree<<<blocksPerGrid, threadsPerBlock>>>
(dPoints, dClusters, dTempPoints, dKdTree, visitedPerThread);
ComputeCentroids<<<blocksPerGrid, threadsPerBlock>>>(dClusters, dTempPoints);
cudaMemset(repeat, 0, sizeof(unsigned int));
RepeatNeeded<<<blocksPerGrid, threadsPerBlock>>>(dPoints, dTempPoints, repeat);
cudaMemcpy(&repeatHost, repeat, sizeof(unsigned int), cudaMemcpyDeviceToHost);
} while (repeatHost);
cudaMemcpy(points, dPoints, sizeof(Point)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(clusters, dClusters, sizeof(Cluster)*K, cudaMemcpyDeviceToHost);
cudaFree(dPoints);
cudaFree(dClusters);
cudaFree(dTempPoints);
cudaFree(repeat);
cudaFree(visitedPerThread);
cudaFree(dKdTree);
}
#endif // defined (GPU_KD)
void DoKmeansGPU (Point *points, Cluster *clusters)
{
Point *dPoints, *dTempPoints;
Cluster *dClusters; unsigned int *repeat, repeatHost;
cudaMalloc ((void **)&dPoints, sizeof(Point)*N);
cudaMalloc ((void **)&dClusters, sizeof(Cluster)*K);
cudaMalloc ((void **)&dTempPoints, sizeof(Point)*N);
cudaMalloc ((void **)&repeat, sizeof(unsigned int));
cudaMemcpy(dPoints, points, sizeof(Point)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dClusters, clusters, sizeof(Cluster)*K, cudaMemcpyHostToDevice);
dim3 threadsPerBlock (256);
dim3 blocksPerGrid (N/threadsPerBlock.x);
do {
ResetCentroidForEachCluster<<<blocksPerGrid, threadsPerBlock>>>(dClusters);
ComputeClusters<<<blocksPerGrid, threadsPerBlock>>>(dPoints, dClusters, dTempPoints);
ComputeCentroids<<<blocksPerGrid, threadsPerBlock>>>(dClusters, dTempPoints);
cudaMemset(repeat, 0, sizeof(unsigned int));
RepeatNeeded<<<blocksPerGrid, threadsPerBlock>>>(dPoints, dTempPoints, repeat);
cudaMemcpy(&repeatHost, repeat, sizeof(unsigned int), cudaMemcpyDeviceToHost);
} while (repeatHost);
cudaMemcpy(points, dPoints, sizeof(Point)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(clusters, dClusters, sizeof(Cluster)*K, cudaMemcpyDeviceToHost);
cudaFree(dPoints);
cudaFree(dClusters);
cudaFree(dTempPoints);
cudaFree(repeat);
}
#endif // defined(GPU) || defined(GPU_KD)
#if defined (CPU_KD)
void DoKmeansCPUKdTree (Point *points, Cluster *clusters)
{
int i, inCluster;
bool changed;
Point *tempPoints;
Cluster *nearestCluster;
KdTree *kdTree;
// One for each cluster (and point). For cluster, use x and y, for point, clusterId.
tempPoints = (Point *) malloc (sizeof(Point)*N);
do {
memset(tempPoints, 0, sizeof(Point)*N);
for (i = 0; i < K; i++) {
clusters[i].noOfPoints = 0;
}
// Save the old clusterId for each point. Reusing tempPoints
for (i = 0; i < N; i++) {
tempPoints[i].clusterId = points[i].clusterId;
}
kdTree = BuildKdTree(clusters, K, false);
// For each point, find the nearest centroid.
for (i = 0; i < N; i++) {
nearestCluster = NearestNeighbour(kdTree, clusters, points[i]);
inCluster = nearestCluster->pt.clusterId;
clusters[inCluster].noOfPoints++;
tempPoints[inCluster].loc[X_AXIS] += points[i].loc[X_AXIS];
tempPoints[inCluster].loc[Y_AXIS] += points[i].loc[Y_AXIS];
points[i].clusterId = inCluster;
}
// Compute new centroid for each cluster
for (i = 0; i < K; i++) {
// Assuming that each cluster has atleast one point in it.
assert(clusters[i].noOfPoints != 0);
clusters[i].pt.loc[X_AXIS] = tempPoints[i].loc[X_AXIS]/clusters[i].noOfPoints;
clusters[i].pt.loc[Y_AXIS] = tempPoints[i].loc[Y_AXIS]/clusters[i].noOfPoints;
}
// Check if anything has changed
changed = false;
for (i = 0; i < N; i++) {
if (points[i].clusterId != tempPoints[i].clusterId) {
changed = true;
break;
}
}
} while (changed);
}
#endif // #if defined (CPU_KD)
void DoKmeansCPU (Point *points, Cluster *clusters)
{
double max;
int i, j, inCluster;
bool changed;
Point *tempPoints;
// One for each cluster (and point). For cluster, use x and y, for point, clusterId.
tempPoints = (Point *) malloc (sizeof(Point)*N);
do {
memset(tempPoints, 0, sizeof(Point)*N);
for (i = 0; i < K; i++) {
clusters[i].noOfPoints = 0;
}
// Save the old clusterId for each point. Reusing tempPoints
for (i = 0; i < N; i++) {
tempPoints[i].clusterId = points[i].clusterId;
}
// For each point, find the nearest centroid.
for (i = 0; i < N; i++) {
max = GetDistance(points[i], clusters[0].pt);
inCluster = 0;
for (j = 0; j < K; j++) {
if (GetDistance(points[i], clusters[j].pt) < max) {
inCluster = j;
// TODO: We should next store these distances, instead of re-computing
// (I don't mean from above call, I mean totally for the program).
max = GetDistance(points[i], clusters[j].pt);
}
}
clusters[inCluster].noOfPoints++;
tempPoints[inCluster].loc[X_AXIS] += points[i].loc[X_AXIS];
tempPoints[inCluster].loc[Y_AXIS] += points[i].loc[Y_AXIS];
points[i].clusterId = inCluster;
}
// Compute new centroid for each cluster
for (i = 0; i < K; i++) {
// Assuming that each cluster has atleast one point in it.
assert(clusters[i].noOfPoints != 0);
clusters[i].pt.loc[X_AXIS] = tempPoints[i].loc[X_AXIS]/clusters[i].noOfPoints;
clusters[i].pt.loc[Y_AXIS] = tempPoints[i].loc[Y_AXIS]/clusters[i].noOfPoints;
}
// Check if anything has changed
changed = false;
for (i = 0; i < N; i++) {
if (points[i].clusterId != tempPoints[i].clusterId) {
changed = true;
break;
}
}
} while (changed);
}
int main (int argc, char *argv[])
{
Point *pointsCPU;
Cluster *clustersCPU;
int i, j;
#ifdef GPU
Point *pointsGPU;
Cluster *clustersGPU;
#endif
#ifdef GPU_KD
Point *pointsGPUKdTree;
Cluster *clustersGPUKdTree;
#endif
#ifdef CPU_KD
Point *pointsCPUKdTree;
Cluster *clustersCPUKdTree;
#endif
srandom(time(NULL));
pointsCPU = (Point *) malloc (sizeof(Point)*N);
clustersCPU = (Cluster *) malloc (sizeof(Cluster)*K);
// Get the points randomly
for (i = 0; i < N; i++) {
pointsCPU[i].loc[X_AXIS] = (random()/1021322);
pointsCPU[i].loc[Y_AXIS] = (random()/1021322);
pointsCPU[i].clusterId = -1;
}
// Initialize clusters
for (i = 0; i < K; i++) {
clustersCPU[i].pt.clusterId = i;
clustersCPU[i].noOfPoints = 0;
j = random()%N;
if (pointsCPU[j].clusterId != -1) {
i--; continue;
// Potential infinite loop
}
pointsCPU[j].clusterId = i;
clustersCPU[i].pt.loc[X_AXIS] = pointsCPU[j].loc[X_AXIS];
clustersCPU[i].pt.loc[Y_AXIS] = pointsCPU[j].loc[Y_AXIS];
}
#ifdef DEBUG
printf ("Initial points:\n");
for (i = 0; i < N; i++) {
printf ("x=%.2f,y=%.2f,clusterId=%d\n", pointsCPU[i].loc[X_AXIS], pointsCPU[i].loc[Y_AXIS], pointsCPU[i].clusterId);
}
printf ("Initial clusters:\n");
for (i = 0; i < K; i++) {
printf("clusterId=%d,noOfPoints=%d,centroidX=%.2f,centroidY=%.2f\n", clustersCPU[i].pt.clusterId,
clustersCPU[i].noOfPoints, clustersCPU[i].pt.loc[X_AXIS], clustersCPU[i].pt.loc[Y_AXIS]);
}
#endif // DEBUG
#ifdef GPU_KD
pointsGPUKdTree = (Point *) malloc (sizeof(Point)*N);
clustersGPUKdTree = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsGPUKdTree, pointsCPU, sizeof(Point)*N);
memcpy(clustersGPUKdTree, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansGPUKdTree(pointsGPUKdTree, clustersGPUKdTree);
tend();
printf("%f seconds on GPU KdTree.\n", tval());
#endif
#ifdef GPU
pointsGPU = (Point *) malloc (sizeof(Point)*N);
clustersGPU = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsGPU, pointsCPU, sizeof(Point)*N);
memcpy(clustersGPU, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansGPU(pointsGPU, clustersGPU);
tend();
printf("%f seconds on GPU.\n", tval());
#endif
#ifdef CPU_KD
pointsCPUKdTree = (Point *) malloc (sizeof(Point)*N);
clustersCPUKdTree = (Cluster *) malloc (sizeof(Cluster)*K);
memcpy(pointsCPUKdTree, pointsCPU, sizeof(Point)*N);
memcpy(clustersCPUKdTree, clustersCPU, sizeof(Cluster)*K);
tstart();
DoKmeansCPUKdTree(pointsCPUKdTree, clustersCPUKdTree);
tend();
printf("%f seconds on CPU KdTree.\n", tval());
#endif
// Note plain CPU should always be at the end. Data for other versions are
// copied from here. So don't want it to change before copying.
tstart();
DoKmeansCPU(pointsCPU, clustersCPU);
tend();
printf("%f seconds on CPU.\n", tval());
#ifdef PRETTY_PRINT
#if defined (GPU_KD)
// Showing GPU_KD dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsGPUKdTree[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d GPUKd\n", pointsGPUKdTree[i].loc[X_AXIS], pointsGPUKdTree[i].loc[Y_AXIS], pointsGPUKdTree[i].clusterId);
fclose(fp);
}
}
#elif defined (GPU)
// Showing GPU dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsGPU[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d GPU\n", pointsGPU[i].loc[X_AXIS], pointsGPU[i].loc[Y_AXIS], pointsGPU[i].clusterId);
fclose(fp);
}
}
#elif defined(CPU_KD)
// Showing CPU_KD dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsCPUKdTree[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d CPUKd\n", pointsCPUKdTree[i].loc[X_AXIS], pointsCPUKdTree[i].loc[Y_AXIS], pointsCPUKdTree[i].clusterId);
fclose(fp);
}
}
#else
// Showing CPU dumps
FILE *fp; char buf[20];
system ("rm /tmp/*plot");
for (i = 0; i < N; i++) {
sprintf(buf, "/tmp/%d.plot", pointsCPU[i].clusterId);
fp = fopen (buf, "a");
if (fp) {
fprintf (fp, "%.2f %.2f #%d CPU\n", pointsCPU[i].loc[X_AXIS], pointsCPU[i].loc[Y_AXIS], pointsCPU[i].clusterId);
fclose(fp);
}
}
#endif // #if defined (GPU_KD)
#endif // PRETTY_PRINT
return 0;
}
#if 0
/********** Pretty print script ***********
// string=""
// for plot in /tmp/*.plot
// do
// string="${string},\"$plot\""
// done
//
// string=`cut -c 2- <<EOF
/ $string
// EOF`
//
// echo "set key off" > /tmp/plot
// echo "plot $string" >> /tmp/plot
// gnuplot -persist < /tmp/plot
//
// # ah
************** End script ****************/
#endif
|
e6cb582f5eba431177364ea29815fc8895a5e71b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SoftmaxRegression.h"
#include "iomanip"
SoftmaxRegression::SoftmaxRegression(std::shared_ptr<SoftmaxSettings> aSettings,
std::shared_ptr<SoftmaxData> aData,
std::shared_ptr<GLInstance> aGLInstance) :
mSettings(aSettings),
mData(aData),
mGL(aGLInstance)
{
initialize();
hipLaunchKernelGGL(( assignPointQuadIndices), dim3(mRegressionBlocks), dim3(mRegressionTPB), 0, 0,
mData->devPointsPtr, mData->devQuadIndicesPtr,
mSettings->numPoints, mSettings->numClasses,
mSettings->windowWidth, mSettings->windowHeight);
}
SoftmaxRegression::~SoftmaxRegression()
{
}
void SoftmaxRegression::execute()
{
int steps = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float fpsTime = 0.0;
float mapMin = 0;
float mapMax = 0;
std::vector<std::vector<float>> dLogLSums(mSettings->numClasses, std::vector<float>(mSettings->numFeatures, 0.0)); // numClasses x numFeatures sum of divLogL terms to set on each regression step
std::vector<float> probNorms;
probNorms.resize(mSettings->numClasses);
std::string frameName;
while(!glfwWindowShouldClose(mGL->window))
{
hipEventRecord(start, 0);
float3 *colorsPtr;
gpuErrchk(hipGraphicsMapResources(1, &mGL->cudaColorResource, 0));
size_t numBytes;
gpuErrchk(hipGraphicsResourceGetMappedPointer((void**)&colorsPtr, &numBytes,
*&mGL->cudaColorResource));
// Take dLogL for each class, filling each derivative vector
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
hipLaunchKernelGGL(( divLogLikelihood), dim3(mRegressionBlocks), dim3(mRegressionTPB), 0, 0,
mData->devPointsPtr,
mData->devDivLogLPtrs[classIdx][0], // x divs
mData->devDivLogLPtrs[classIdx][1], // y divs
mData->devWeightsPtr,
mSettings->numPoints,
classIdx,
mSettings->numClasses);
gpuErrchk(hipDeviceSynchronize());
}
gpuErrchk(hipDeviceSynchronize());
// Sum the derivative vectors
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
for (int featureIdx = 0; featureIdx < mSettings->numFeatures; ++featureIdx)
{
dLogLSums[classIdx][featureIdx] = thrust::reduce(mData->devDivLogLTerms[classIdx][featureIdx].begin(),
mData->devDivLogLTerms[classIdx][featureIdx].end());
}
}
gpuErrchk(hipDeviceSynchronize());
// reset the derivative vectors for the next iteration
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
for (int f = 0; f < mSettings->numFeatures; ++f)
{
thrust::fill(mData->devDivLogLTerms[classIdx][f].begin(),
mData->devDivLogLTerms[classIdx][f].end(),
0.0);
}
}
// update the weights using the sums and scaling factors
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
mData->hostWeights[classIdx].x += mData->hostAlphas[classIdx].x * dLogLSums[classIdx][0];
mData->hostWeights[classIdx].y += mData->hostAlphas[classIdx].y * dLogLSums[classIdx][1];
}
// copy the updated weights from the host to the device for the next iteration
mData->devWeights = mData->hostWeights;
mData->devWeightsPtr = thrust::raw_pointer_cast(mData->devWeights.data());
///
/// The rest is visualization code
///
gpuErrchk(hipDeviceSynchronize());
// Populate a probability field for each set of class weights. Because they vary in magnitude,
// normalize them and scale them to the same maximum (1.0) before combining to plot.
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
hipLaunchKernelGGL(( CalculateProbability), dim3(mColorBlocks), dim3(mColorTPB) , 0, 0,
mData->devProbFieldPtrs[classIdx],
mData->devWeightsPtr,
classIdx,
mSettings->numClasses,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
probNorms[classIdx] = std::sqrt(thrust::inner_product(mData->devProbFields[classIdx].begin(),
mData->devProbFields[classIdx].end(),
mData->devProbFields[classIdx].begin(), 0.0f));
using namespace thrust::placeholders;
thrust::transform(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end(), mData->devProbFields[classIdx].begin(), _1 /= probNorms[classIdx]);
auto minMaxPtrs = thrust::minmax_element(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end());
mapMax = *minMaxPtrs.second;
mapMin = *minMaxPtrs.first;
thrust::transform(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end(), mData->devProbFields[classIdx].begin(), _1 /= mapMax);
hipLaunchKernelGGL(( RemoveLowers), dim3(mColorBlocks), dim3(mColorTPB) , 0, 0,
mData->devProbFieldPtrs[classIdx],
mSettings->windowWidth,
mSettings->windowHeight,
mapMin,
mapMax);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
}
// sum them in place in the zero'th field
for (int classIdx = 1; classIdx < mSettings->numClasses; ++classIdx)
{
thrust::transform(mData->devProbFields[0].begin(), mData->devProbFields[0].end(), mData->devProbFields[classIdx].begin(), mData->devProbFields[0].begin(), thrust::plus<float>());
gpuErrchk(hipDeviceSynchronize());
}
auto minMaxPtrs = thrust::minmax_element(mData->devProbFields[0].begin(), mData->devProbFields[0].end());
mapMin = *minMaxPtrs.first;
mapMax = *minMaxPtrs.second;
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( Color), dim3(mColorBlocks), dim3(mColorTPB) , 0, 0,
colorsPtr,
mData->devProbFieldPtrs[0],
mData->devColorMapPtr,
mData->devPointsPtr,
mSettings->windowWidth,
mSettings->windowHeight,
mapMin,
mapMax);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( ColorPointQuads), dim3(mRegressionBlocks), dim3(mRegressionTPB) , 0, 0,
colorsPtr,
mData->devQuadIndicesPtr,
mSettings->numPoints,
mSettings->numClasses,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipGraphicsUnmapResources(1, &mGL->cudaColorResource, 0));
Draw();
if (mSettings->recording && steps < mSettings->frames)
{
frameName = FrameNameGen(steps, mSettings->frames);
hipLaunchKernelGGL(( FormPNGData), dim3(mColorBlocks), dim3(mColorTPB) , 0, 0, colorsPtr,
mData->devPixelDataPtr,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(mData->hostPixelData.data(),
mData->devPixelDataPtr,
mSettings->windowWidth * mSettings->windowHeight * 3 * sizeof(unsigned char),
hipMemcpyDeviceToHost));
gpuErrchk(hipPeekAtLastError());
WritePNG(mData->hostPixelData.data(),
frameName,
mSettings->windowWidth,
mSettings->windowHeight);
}
steps++;
hipEventRecord(stop, 0);
hipEventElapsedTime(&fpsTime, start, stop);
char title[512];
sprintf(title, "Cuda Softmax Regression: %12.2f fps, point count: %u, steps taken: %d", 1.0f/(fpsTime/1000.0f), mSettings->numPoints*mSettings->numClasses, steps);
glfwSetWindowTitle(mGL->window, title);
//|| steps == 1
if(glfwGetKey(mGL->window, GLFW_KEY_ESCAPE)) {
glfwSetWindowShouldClose(mGL->window, 1);
std::cout << "Window closed" << std::endl;
}
}
return;
}
void SoftmaxRegression::Draw()
{
mGL->Draw();
}
void SoftmaxRegression::initialize()
{
// Essentially attempt to have
// 256 threads per block as per occupation optimization. Though in all
// honesty I have never benchmarked a thing.
// Also hard coded for my device (3.5) specs which is a no-no
int totalPoints = mSettings->numPoints * mSettings->numClasses;
if (totalPoints <= 2 << 7)
{
mRegressionBlocks = 1;
}
else if (totalPoints <= 2 << 9)
{
mRegressionBlocks = 4;
}
else if (totalPoints <= 2 << 11)
{
mRegressionBlocks = 16;
}
else if (totalPoints <= 2 << 13)
{
mRegressionBlocks = 64;
}
else if (totalPoints <= 2 << 14)
{
mRegressionBlocks = 128;
}
else if (totalPoints <= 2 << 15)
{
mRegressionBlocks = 256;
}
else if (totalPoints <= 2 << 16)
{
mRegressionBlocks = 512;
}
else if (totalPoints <= 2 << 17)
{
mRegressionBlocks = 1024; // need y blocks past this point I believe.
}
mRegressionTPB = totalPoints/mRegressionBlocks;
std::cout << mSettings->windowWidth << " " << mSettings->windowHeight << std::endl;
switch(mSettings->windowWidth * mSettings->windowHeight)
{
// 128 x 128
case 16384:
mColorTPB.x = mSettings->windowWidth/1;
mColorTPB.y = mSettings->windowHeight/128;
mColorBlocks.x = 1;
mColorBlocks.y = 128;
break;
// 256 x 256
case 65536:
mColorTPB.x = mSettings->windowWidth/1;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 1;
mColorBlocks.y = 256;
break;
// 512 x 512
case 262144:
mColorTPB.x = mSettings->windowWidth/2;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 2;
mColorBlocks.y = 256;
break;
case 1024 * 1024:
mColorTPB.x = mSettings->windowWidth/4;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 4;
mColorBlocks.y = 256;
break;
default:
std::cout<<"Bad Dimensions"<<std::endl;
exit(1);
}
std::cout<<" Calling path algorithm kernels with:"<<std::endl
<<" mRegressionTPB: ["<<mRegressionTPB<<"]"<<std::endl
<<" On a Grid of: ["<<mRegressionBlocks<<"] Blocks"<<std::endl<<std::endl;
std::cout<<" Calling painting kernels with:"<<std::endl
<<" mColorTPB: ["<<mColorTPB.x<<","<<mColorTPB.y<<"]"<<std::endl
<<" On a Grid of: ["<<mColorBlocks.x<<","<<mColorBlocks.y<<"]"<<std::endl;
}
| e6cb582f5eba431177364ea29815fc8895a5e71b.cu | #include "SoftmaxRegression.h"
#include "iomanip"
SoftmaxRegression::SoftmaxRegression(std::shared_ptr<SoftmaxSettings> aSettings,
std::shared_ptr<SoftmaxData> aData,
std::shared_ptr<GLInstance> aGLInstance) :
mSettings(aSettings),
mData(aData),
mGL(aGLInstance)
{
initialize();
assignPointQuadIndices<<<mRegressionBlocks, mRegressionTPB>>>
(mData->devPointsPtr, mData->devQuadIndicesPtr,
mSettings->numPoints, mSettings->numClasses,
mSettings->windowWidth, mSettings->windowHeight);
}
SoftmaxRegression::~SoftmaxRegression()
{
}
void SoftmaxRegression::execute()
{
int steps = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float fpsTime = 0.0;
float mapMin = 0;
float mapMax = 0;
std::vector<std::vector<float>> dLogLSums(mSettings->numClasses, std::vector<float>(mSettings->numFeatures, 0.0)); // numClasses x numFeatures sum of divLogL terms to set on each regression step
std::vector<float> probNorms;
probNorms.resize(mSettings->numClasses);
std::string frameName;
while(!glfwWindowShouldClose(mGL->window))
{
cudaEventRecord(start, 0);
float3 *colorsPtr;
gpuErrchk(cudaGraphicsMapResources(1, &mGL->cudaColorResource, 0));
size_t numBytes;
gpuErrchk(cudaGraphicsResourceGetMappedPointer((void**)&colorsPtr, &numBytes,
*&mGL->cudaColorResource));
// Take dLogL for each class, filling each derivative vector
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
divLogLikelihood<<<mRegressionBlocks, mRegressionTPB>>>
(mData->devPointsPtr,
mData->devDivLogLPtrs[classIdx][0], // x divs
mData->devDivLogLPtrs[classIdx][1], // y divs
mData->devWeightsPtr,
mSettings->numPoints,
classIdx,
mSettings->numClasses);
gpuErrchk(cudaDeviceSynchronize());
}
gpuErrchk(cudaDeviceSynchronize());
// Sum the derivative vectors
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
for (int featureIdx = 0; featureIdx < mSettings->numFeatures; ++featureIdx)
{
dLogLSums[classIdx][featureIdx] = thrust::reduce(mData->devDivLogLTerms[classIdx][featureIdx].begin(),
mData->devDivLogLTerms[classIdx][featureIdx].end());
}
}
gpuErrchk(cudaDeviceSynchronize());
// reset the derivative vectors for the next iteration
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
for (int f = 0; f < mSettings->numFeatures; ++f)
{
thrust::fill(mData->devDivLogLTerms[classIdx][f].begin(),
mData->devDivLogLTerms[classIdx][f].end(),
0.0);
}
}
// update the weights using the sums and scaling factors
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
mData->hostWeights[classIdx].x += mData->hostAlphas[classIdx].x * dLogLSums[classIdx][0];
mData->hostWeights[classIdx].y += mData->hostAlphas[classIdx].y * dLogLSums[classIdx][1];
}
// copy the updated weights from the host to the device for the next iteration
mData->devWeights = mData->hostWeights;
mData->devWeightsPtr = thrust::raw_pointer_cast(mData->devWeights.data());
///
/// The rest is visualization code
///
gpuErrchk(cudaDeviceSynchronize());
// Populate a probability field for each set of class weights. Because they vary in magnitude,
// normalize them and scale them to the same maximum (1.0) before combining to plot.
for (int classIdx = 0; classIdx < mSettings->numClasses; ++classIdx)
{
CalculateProbability<<< mColorBlocks, mColorTPB >>>
(mData->devProbFieldPtrs[classIdx],
mData->devWeightsPtr,
classIdx,
mSettings->numClasses,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
probNorms[classIdx] = std::sqrt(thrust::inner_product(mData->devProbFields[classIdx].begin(),
mData->devProbFields[classIdx].end(),
mData->devProbFields[classIdx].begin(), 0.0f));
using namespace thrust::placeholders;
thrust::transform(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end(), mData->devProbFields[classIdx].begin(), _1 /= probNorms[classIdx]);
auto minMaxPtrs = thrust::minmax_element(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end());
mapMax = *minMaxPtrs.second;
mapMin = *minMaxPtrs.first;
thrust::transform(mData->devProbFields[classIdx].begin(), mData->devProbFields[classIdx].end(), mData->devProbFields[classIdx].begin(), _1 /= mapMax);
RemoveLowers<<< mColorBlocks, mColorTPB >>>
(mData->devProbFieldPtrs[classIdx],
mSettings->windowWidth,
mSettings->windowHeight,
mapMin,
mapMax);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
}
// sum them in place in the zero'th field
for (int classIdx = 1; classIdx < mSettings->numClasses; ++classIdx)
{
thrust::transform(mData->devProbFields[0].begin(), mData->devProbFields[0].end(), mData->devProbFields[classIdx].begin(), mData->devProbFields[0].begin(), thrust::plus<float>());
gpuErrchk(cudaDeviceSynchronize());
}
auto minMaxPtrs = thrust::minmax_element(mData->devProbFields[0].begin(), mData->devProbFields[0].end());
mapMin = *minMaxPtrs.first;
mapMax = *minMaxPtrs.second;
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
Color<<< mColorBlocks, mColorTPB >>>
(colorsPtr,
mData->devProbFieldPtrs[0],
mData->devColorMapPtr,
mData->devPointsPtr,
mSettings->windowWidth,
mSettings->windowHeight,
mapMin,
mapMax);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
ColorPointQuads<<< mRegressionBlocks, mRegressionTPB >>>
(colorsPtr,
mData->devQuadIndicesPtr,
mSettings->numPoints,
mSettings->numClasses,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaGraphicsUnmapResources(1, &mGL->cudaColorResource, 0));
Draw();
if (mSettings->recording && steps < mSettings->frames)
{
frameName = FrameNameGen(steps, mSettings->frames);
FormPNGData<<< mColorBlocks, mColorTPB >>> (colorsPtr,
mData->devPixelDataPtr,
mSettings->windowWidth,
mSettings->windowHeight);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(mData->hostPixelData.data(),
mData->devPixelDataPtr,
mSettings->windowWidth * mSettings->windowHeight * 3 * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
gpuErrchk(cudaPeekAtLastError());
WritePNG(mData->hostPixelData.data(),
frameName,
mSettings->windowWidth,
mSettings->windowHeight);
}
steps++;
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&fpsTime, start, stop);
char title[512];
sprintf(title, "Cuda Softmax Regression: %12.2f fps, point count: %u, steps taken: %d", 1.0f/(fpsTime/1000.0f), mSettings->numPoints*mSettings->numClasses, steps);
glfwSetWindowTitle(mGL->window, title);
//|| steps == 1
if(glfwGetKey(mGL->window, GLFW_KEY_ESCAPE)) {
glfwSetWindowShouldClose(mGL->window, 1);
std::cout << "Window closed" << std::endl;
}
}
return;
}
void SoftmaxRegression::Draw()
{
mGL->Draw();
}
void SoftmaxRegression::initialize()
{
// Essentially attempt to have
// 256 threads per block as per occupation optimization. Though in all
// honesty I have never benchmarked a thing.
// Also hard coded for my device (3.5) specs which is a no-no
int totalPoints = mSettings->numPoints * mSettings->numClasses;
if (totalPoints <= 2 << 7)
{
mRegressionBlocks = 1;
}
else if (totalPoints <= 2 << 9)
{
mRegressionBlocks = 4;
}
else if (totalPoints <= 2 << 11)
{
mRegressionBlocks = 16;
}
else if (totalPoints <= 2 << 13)
{
mRegressionBlocks = 64;
}
else if (totalPoints <= 2 << 14)
{
mRegressionBlocks = 128;
}
else if (totalPoints <= 2 << 15)
{
mRegressionBlocks = 256;
}
else if (totalPoints <= 2 << 16)
{
mRegressionBlocks = 512;
}
else if (totalPoints <= 2 << 17)
{
mRegressionBlocks = 1024; // need y blocks past this point I believe.
}
mRegressionTPB = totalPoints/mRegressionBlocks;
std::cout << mSettings->windowWidth << " " << mSettings->windowHeight << std::endl;
switch(mSettings->windowWidth * mSettings->windowHeight)
{
// 128 x 128
case 16384:
mColorTPB.x = mSettings->windowWidth/1;
mColorTPB.y = mSettings->windowHeight/128;
mColorBlocks.x = 1;
mColorBlocks.y = 128;
break;
// 256 x 256
case 65536:
mColorTPB.x = mSettings->windowWidth/1;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 1;
mColorBlocks.y = 256;
break;
// 512 x 512
case 262144:
mColorTPB.x = mSettings->windowWidth/2;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 2;
mColorBlocks.y = 256;
break;
case 1024 * 1024:
mColorTPB.x = mSettings->windowWidth/4;
mColorTPB.y = mSettings->windowHeight/256;
mColorBlocks.x = 4;
mColorBlocks.y = 256;
break;
default:
std::cout<<"Bad Dimensions"<<std::endl;
exit(1);
}
std::cout<<" Calling path algorithm kernels with:"<<std::endl
<<" mRegressionTPB: ["<<mRegressionTPB<<"]"<<std::endl
<<" On a Grid of: ["<<mRegressionBlocks<<"] Blocks"<<std::endl<<std::endl;
std::cout<<" Calling painting kernels with:"<<std::endl
<<" mColorTPB: ["<<mColorTPB.x<<","<<mColorTPB.y<<"]"<<std::endl
<<" On a Grid of: ["<<mColorBlocks.x<<","<<mColorBlocks.y<<"]"<<std::endl;
}
|
62f48da46cb64aa55eb65af34466e0bc7e9c65f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "argmaxwithvalue_impl.cuh"
#include "device/gpu/cuda_common.h"
#include "include/hip/hip_fp16.h"
template <typename T, typename S>
__global__ void ArgmaxWithValue(size_t size, const T* input, const int bound, int outerSize, int innerSize,
S* index, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
for (int i = 0; i < outerSize; i++) {
int inputOutterOffset = i * innerSize * bound;
int outputOutterOffset = i * innerSize;
for (int j = 0; j < innerSize; j++) {
auto outputInnerOffset = outputOutterOffset + j;
S idx = 0;
T maxData = input[j + inputOutterOffset];
for (S c = 0; c < bound; c++) {
int offset = j + c * innerSize;
auto inputData = input[inputOutterOffset + offset];
idx = inputData > maxData ? c : idx;
maxData = inputData > maxData ? inputData : maxData;
}
output[outputInnerOffset] = maxData;
index[outputInnerOffset] = idx;
}
}
}
return;
}
template <typename T, typename S>
void CalArgmaxWithValue(size_t size, const T* input, const int bound_, const int outerSize_, const int innerSize_,
S* index, T* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ArgmaxWithValue), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, bound_, outerSize_, innerSize_,
index, output);
return;
}
template void CalArgmaxWithValue<float, int>(size_t size, const float* input, const int bound_, const int outerSize_,
const int innerSize_, int* index, float* output,
hipStream_t cuda_stream);
template void CalArgmaxWithValue<half, int>(size_t size, const half* input, const int bound_, const int outerSize_,
const int innerSize_, int* index, half* output,
hipStream_t cuda_stream);
| 62f48da46cb64aa55eb65af34466e0bc7e9c65f3.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "argmaxwithvalue_impl.cuh"
#include "device/gpu/cuda_common.h"
#include "include/cuda_fp16.h"
template <typename T, typename S>
__global__ void ArgmaxWithValue(size_t size, const T* input, const int bound, int outerSize, int innerSize,
S* index, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
for (int i = 0; i < outerSize; i++) {
int inputOutterOffset = i * innerSize * bound;
int outputOutterOffset = i * innerSize;
for (int j = 0; j < innerSize; j++) {
auto outputInnerOffset = outputOutterOffset + j;
S idx = 0;
T maxData = input[j + inputOutterOffset];
for (S c = 0; c < bound; c++) {
int offset = j + c * innerSize;
auto inputData = input[inputOutterOffset + offset];
idx = inputData > maxData ? c : idx;
maxData = inputData > maxData ? inputData : maxData;
}
output[outputInnerOffset] = maxData;
index[outputInnerOffset] = idx;
}
}
}
return;
}
template <typename T, typename S>
void CalArgmaxWithValue(size_t size, const T* input, const int bound_, const int outerSize_, const int innerSize_,
S* index, T* output, cudaStream_t cuda_stream) {
ArgmaxWithValue<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, bound_, outerSize_, innerSize_,
index, output);
return;
}
template void CalArgmaxWithValue<float, int>(size_t size, const float* input, const int bound_, const int outerSize_,
const int innerSize_, int* index, float* output,
cudaStream_t cuda_stream);
template void CalArgmaxWithValue<half, int>(size_t size, const half* input, const int bound_, const int outerSize_,
const int innerSize_, int* index, half* output,
cudaStream_t cuda_stream);
|
cb42b13d28873761d2a5ad8ba5e25dd9bd7d0c71.hip | // !!! This is a file automatically generated by hipify!!!
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include <hip/hip_runtime.h>
#include <cuda_utils.h>
#include <Box-Pistons-Orthogonal/AndersenBarostat.h>
template<typename T, int N>
struct H_D{
T* h;
T* d;
H_D(){
h= (T*)malloc(N*sizeof(T));
hipMalloc((void**)&d,N*sizeof(T));
}
void c2d(){
cudaCheck(hipMemcpy(d,h,N*sizeof(T),hipMemcpyHostToDevice));
}
void c2h(){
cudaCheck(hipMemcpy(h,d,N*sizeof(T),hipMemcpyDeviceToHost));
}
};
TEST_CASE("Constant pressure piston drift"){
H_D<double3,1> virial;
H_D<double3,1> comke;
H_D<double3,1> box;
H_D<double3,1> box_dot;
H_D<double3,1> v_prescale;
H_D<double3,1> v_scale;
H_D<double3,1> r_prescale;
//initialize everything to zero
double3 zero3;
zero3.x=0;
zero3.y=0;
zero3.z=0;
double3 one3;
one3.x=1;
one3.y=1;
one3.z=1;
boxpiston::AndersenBarostat piston;
piston.gamma= 0;
piston.piston_mass= 1;
piston.pressure=1;
piston.k= one3;
*virial.h= zero3;
*comke.h= one3;
*box.h= one3;
*box_dot.h= zero3;
double timestep= 0.01;
SECTION("NPH ideal gas simulation"){
double initial_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
for (int i=0;i<1000000;i++){
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.thermostat(*box.h,*box_dot.h,*box_dot.h,85,timestep,0,0,i*256);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
double final_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
REQUIRE(final_total_energy==Approx(initial_total_energy).epsilon(1E-4));
}
piston.pressure=5;
initial_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
for (int i=0;i<1000000;i++){
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.thermostat(*box.h,*box_dot.h,*box_dot.h,85,timestep,0,1,i*256);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
double final_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
REQUIRE(final_total_energy==Approx(initial_total_energy).epsilon(1E-4));
}
}
}
| cb42b13d28873761d2a5ad8ba5e25dd9bd7d0c71.cu | #define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include <cuda_runtime.h>
#include <cuda_utils.h>
#include <Box-Pistons-Orthogonal/AndersenBarostat.h>
template<typename T, int N>
struct H_D{
T* h;
T* d;
H_D(){
h= (T*)malloc(N*sizeof(T));
cudaMalloc((void**)&d,N*sizeof(T));
}
void c2d(){
cudaCheck(cudaMemcpy(d,h,N*sizeof(T),cudaMemcpyHostToDevice));
}
void c2h(){
cudaCheck(cudaMemcpy(h,d,N*sizeof(T),cudaMemcpyDeviceToHost));
}
};
TEST_CASE("Constant pressure piston drift"){
H_D<double3,1> virial;
H_D<double3,1> comke;
H_D<double3,1> box;
H_D<double3,1> box_dot;
H_D<double3,1> v_prescale;
H_D<double3,1> v_scale;
H_D<double3,1> r_prescale;
//initialize everything to zero
double3 zero3;
zero3.x=0;
zero3.y=0;
zero3.z=0;
double3 one3;
one3.x=1;
one3.y=1;
one3.z=1;
boxpiston::AndersenBarostat piston;
piston.gamma= 0;
piston.piston_mass= 1;
piston.pressure=1;
piston.k= one3;
*virial.h= zero3;
*comke.h= one3;
*box.h= one3;
*box_dot.h= zero3;
double timestep= 0.01;
SECTION("NPH ideal gas simulation"){
double initial_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
for (int i=0;i<1000000;i++){
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.thermostat(*box.h,*box_dot.h,*box_dot.h,85,timestep,0,0,i*256);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
double final_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
REQUIRE(final_total_energy==Approx(initial_total_energy).epsilon(1E-4));
}
piston.pressure=5;
initial_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
for (int i=0;i<1000000;i++){
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.thermostat(*box.h,*box_dot.h,*box_dot.h,85,timestep,0,1,i*256);
piston.drift(*comke.h,*box.h,*box_dot.h,*comke.h,*box.h,*box_dot.h,
*v_scale.h,*v_prescale.h,*r_prescale.h,timestep*0.5);
piston.virialKick(*virial.h,*box.h,*box_dot.h,*box_dot.h,timestep*0.5);
double final_total_energy= piston.pe(*box.h)+piston.ke(*box.h,*box_dot.h)+
comke.h->x+comke.h->y+comke.h->z;
REQUIRE(final_total_energy==Approx(initial_total_energy).epsilon(1E-4));
}
}
}
|
50dfded74956058bf32c9611665c6e9d58ef0b44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/geometry/coord_flip.h"
#include <utility>
#include <vector>
namespace dali {
namespace {
template <typename T = float>
struct SampleDesc {
float* out = nullptr;
const float* in = nullptr;
int64_t size = 0;
uint8_t flip_dim_mask = 0;
float mirrored_origin[3];
};
template <typename T = float>
__global__ void CoordFlipKernel(const SampleDesc<T>* samples, int ndim) {
int64_t block_size = blockDim.x;
int64_t grid_size = gridDim.x * block_size;
int sample_idx = blockIdx.y;
const auto &sample = samples[sample_idx];
int64_t offset = block_size * blockIdx.x;
int64_t tid = threadIdx.x;
for (int64_t idx = offset + tid; idx < sample.size; idx += grid_size) {
int d = idx % ndim;
bool flip = sample.flip_dim_mask & (1 << d);
sample.out[idx] = flip ? sample.mirrored_origin[d] - sample.in[idx] : sample.in[idx];
}
}
} // namespace
class CoordFlipGPU : public CoordFlip<GPUBackend> {
public:
explicit CoordFlipGPU(const OpSpec &spec)
: CoordFlip<GPUBackend>(spec) {}
~CoordFlipGPU() override = default;
DISABLE_COPY_MOVE_ASSIGN(CoordFlipGPU);
void RunImpl(workspace_t<GPUBackend> &ws) override;
USE_OPERATOR_MEMBERS();
using Operator<GPUBackend>::RunImpl;
using CoordFlip<GPUBackend>::layout_;
private:
std::vector<SampleDesc<float>> sample_descs_;
Tensor<GPUBackend> scratchpad_;
};
void CoordFlipGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
sample_descs_.clear();
sample_descs_.reserve(max_batch_size_);
for (int sample_id = 0; sample_id < curr_batch_size; sample_id++) {
SampleDesc<float> sample_desc;
sample_desc.in = input.tensor<float>(sample_id);
sample_desc.out = output.mutable_tensor<float>(sample_id);
sample_desc.size = volume(input.tensor_shape(sample_id));
assert(sample_desc.size == volume(output.tensor_shape(sample_id)));
bool flip_x = spec_.GetArgument<int>("flip_x", &ws, sample_id);
bool flip_y = spec_.GetArgument<int>("flip_y", &ws, sample_id);
bool flip_z = spec_.GetArgument<int>("flip_z", &ws, sample_id);
if (flip_x) {
sample_desc.flip_dim_mask |= (1 << x_dim_);
}
if (flip_y) {
sample_desc.flip_dim_mask |= (1 << y_dim_);
}
if (flip_z) {
sample_desc.flip_dim_mask |= (1 << z_dim_);
}
sample_desc.mirrored_origin[x_dim_] =
2.0f * spec_.GetArgument<float>("center_x", &ws, sample_id);
sample_desc.mirrored_origin[y_dim_] =
2.0f * spec_.GetArgument<float>("center_y", &ws, sample_id);
sample_desc.mirrored_origin[z_dim_] =
2.0f * spec_.GetArgument<float>("center_z", &ws, sample_id);
sample_descs_.emplace_back(std::move(sample_desc));
}
int64_t sz = curr_batch_size * sizeof(SampleDesc<float>);
scratchpad_.Resize({sz}, DALI_UINT8);
auto sample_descs_gpu_ = reinterpret_cast<SampleDesc<float>*>(
scratchpad_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
hipMemcpyAsync(sample_descs_gpu_, sample_descs_.data(), sz, hipMemcpyHostToDevice, stream));
int block = 1024;
auto blocks_per_sample = ::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
hipLaunchKernelGGL(( CoordFlipKernel), dim3(grid), dim3(block), 0, stream, sample_descs_gpu_, ndim_);
}
DALI_REGISTER_OPERATOR(CoordFlip, CoordFlipGPU, GPU);
} // namespace dali
| 50dfded74956058bf32c9611665c6e9d58ef0b44.cu | // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/geometry/coord_flip.h"
#include <utility>
#include <vector>
namespace dali {
namespace {
template <typename T = float>
struct SampleDesc {
float* out = nullptr;
const float* in = nullptr;
int64_t size = 0;
uint8_t flip_dim_mask = 0;
float mirrored_origin[3];
};
template <typename T = float>
__global__ void CoordFlipKernel(const SampleDesc<T>* samples, int ndim) {
int64_t block_size = blockDim.x;
int64_t grid_size = gridDim.x * block_size;
int sample_idx = blockIdx.y;
const auto &sample = samples[sample_idx];
int64_t offset = block_size * blockIdx.x;
int64_t tid = threadIdx.x;
for (int64_t idx = offset + tid; idx < sample.size; idx += grid_size) {
int d = idx % ndim;
bool flip = sample.flip_dim_mask & (1 << d);
sample.out[idx] = flip ? sample.mirrored_origin[d] - sample.in[idx] : sample.in[idx];
}
}
} // namespace
class CoordFlipGPU : public CoordFlip<GPUBackend> {
public:
explicit CoordFlipGPU(const OpSpec &spec)
: CoordFlip<GPUBackend>(spec) {}
~CoordFlipGPU() override = default;
DISABLE_COPY_MOVE_ASSIGN(CoordFlipGPU);
void RunImpl(workspace_t<GPUBackend> &ws) override;
USE_OPERATOR_MEMBERS();
using Operator<GPUBackend>::RunImpl;
using CoordFlip<GPUBackend>::layout_;
private:
std::vector<SampleDesc<float>> sample_descs_;
Tensor<GPUBackend> scratchpad_;
};
void CoordFlipGPU::RunImpl(workspace_t<GPUBackend> &ws) {
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
auto curr_batch_size = ws.GetInputBatchSize(0);
sample_descs_.clear();
sample_descs_.reserve(max_batch_size_);
for (int sample_id = 0; sample_id < curr_batch_size; sample_id++) {
SampleDesc<float> sample_desc;
sample_desc.in = input.tensor<float>(sample_id);
sample_desc.out = output.mutable_tensor<float>(sample_id);
sample_desc.size = volume(input.tensor_shape(sample_id));
assert(sample_desc.size == volume(output.tensor_shape(sample_id)));
bool flip_x = spec_.GetArgument<int>("flip_x", &ws, sample_id);
bool flip_y = spec_.GetArgument<int>("flip_y", &ws, sample_id);
bool flip_z = spec_.GetArgument<int>("flip_z", &ws, sample_id);
if (flip_x) {
sample_desc.flip_dim_mask |= (1 << x_dim_);
}
if (flip_y) {
sample_desc.flip_dim_mask |= (1 << y_dim_);
}
if (flip_z) {
sample_desc.flip_dim_mask |= (1 << z_dim_);
}
sample_desc.mirrored_origin[x_dim_] =
2.0f * spec_.GetArgument<float>("center_x", &ws, sample_id);
sample_desc.mirrored_origin[y_dim_] =
2.0f * spec_.GetArgument<float>("center_y", &ws, sample_id);
sample_desc.mirrored_origin[z_dim_] =
2.0f * spec_.GetArgument<float>("center_z", &ws, sample_id);
sample_descs_.emplace_back(std::move(sample_desc));
}
int64_t sz = curr_batch_size * sizeof(SampleDesc<float>);
scratchpad_.Resize({sz}, DALI_UINT8);
auto sample_descs_gpu_ = reinterpret_cast<SampleDesc<float>*>(
scratchpad_.mutable_data<uint8_t>());
auto stream = ws.stream();
CUDA_CALL(
cudaMemcpyAsync(sample_descs_gpu_, sample_descs_.data(), sz, cudaMemcpyHostToDevice, stream));
int block = 1024;
auto blocks_per_sample = std::max(32, 1024 / curr_batch_size);
dim3 grid(blocks_per_sample, curr_batch_size);
CoordFlipKernel<<<grid, block, 0, stream>>>(sample_descs_gpu_, ndim_);
}
DALI_REGISTER_OPERATOR(CoordFlip, CoordFlipGPU, GPU);
} // namespace dali
|
03e82ddda8ce1a627ec4c7baeb0d715426b05731.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//The program finds the path for an UAV using Probabilistic Roadmap Method (PRM)- serial version.
// nvcc PRM_parallel_17.cu -use_fast_math -Xptxas -v -arch=sm_30 -lcurand -I/home/roksana/Dropbox/ann/include -L/home/roksana/Dropbox/ann/lib -lANN
//nvcc PRM_parallel_17.cu -use_fast_math -Xptxas -v -arch=sm_21 -lcurand -I/home/shimu/Dropbox/ann/include -L/home/shimu/Dropbox/ann/lib -lANN
//nvcc main1.cu -use_fast_math -Xptxas -v -arch=sm_21 -lcurand -I/home/shimu/ann/include -L/home/shimu/ann/lib -lANN
//input: start & destination location
//output: a set of locations that includes start location, intermediate locations and destination location
#include<iostream>
#include <stdint.h>
#include <inttypes.h>
#include <cmath>
#include <sstream>
#include "predefined_variables.h"
#include "data_structures.h"
#include "milestones.h"
#include "update_z_number.h"
#include "update_k_neighbors.h"
#include "ann_update_k_neighbors.h"
#include "find_shortest_path.h"
#include "device_BitonicSort_prm.cuh"
#include "device_codes.cuh"
#include "function_codes.cuh"
#include "obstacle_generation_file.cuh"
using namespace std;
int main(){
//generate_input_nodes();
bool parallel=false;//true;//false; //false; //true;
// read the list of start and end nodes
// the nodes are given as A, B, C, D. we need to read the nodes coordinate first. Then find the path between A to B, B to C, C to D.
vector<position> input_nodes=read_input_nodes();
if (input_nodes.size()<=1){
cout<<"Error: Not enough nodes to find path"<<endl;
return 0;
}
//for(int i=0; i<input_nodes.size();i++)cout<<input_nodes[i].x<< " "<<input_nodes[i].y<< " "<<input_nodes[i].z<< endl;
//****************************read obstacles************************************************
//generate_obstacle(input_nodes);
clock_t read_obstacle_tStart = clock();
vector<Sphere>obstacles; obstacles.resize(number_of_obstacles);
read_the_obstacles(obstacles);
clock_t read_obstacle_tEnd = clock();
printf("from the loop: Time taken to read the obstacle: %.6fs\n", (double)(read_obstacle_tEnd - read_obstacle_tStart)/CLOCKS_PER_SEC);
clock_t tStart = clock();
double total_time_sample_generation=0;
vector<vector<local_planner_node*> > ALL_sample_local_planner_node;
for(int n=0; n<input_nodes.size()-1; n++){
//******************initialize the start and the destination location***********************
local_planner_node* start= new local_planner_node();
start->set_local_planner_node(input_nodes[n].x, input_nodes[n].y, input_nodes[n].z);
cout<<"start: " <<start->p.x<<" "<<start->p.y<<" "<<start->p.z<<endl;
start->dist_from_start=0;
local_planner_node* destination = new local_planner_node();
destination->set_local_planner_node(input_nodes[n+1].x, input_nodes[n+1].y, input_nodes[n+1].z);
cout<<"destination: "<<destination->p.x<<" "<<destination->p.y<<" "<<destination->p.z<<endl;
//************************boundary conditions***********************************************
//check if the start and destination location collides with any obstacles
if(!boundary_condition_satisfied(start, destination, obstacles)){
cout<<"boundary condition doesn't satisfied either for start or destination"<<endl;
//return 0;
}
//******************* find the grid size for putting the samples****************************
vector<float> xyz_limit(6);
vector<int> sample_grid_size =find_the_sample_grid_size(start->p, destination->p, xyz_limit);
cout<<" nodes_in_x = "<<sample_grid_size[0]<<" nodes_in_y = "<<sample_grid_size[1]<<" nodes_in_z = "<<sample_grid_size[2]<<endl;
//cout<<"x_start="<<xyz_limit[0]<<", x_end="<<xyz_limit[1]<<", y_start="<<xyz_limit[2]<<", y_end="<<xyz_limit[3]<<", z_start="<<xyz_limit[4]<<", z_end="<<xyz_limit[5]<<endl;
//**************************generate the sample locations***********************************
vector<local_planner_node*> sample_local_planner_node;
//cout<<"Select one of the following options:"<<endl<<" 1. Generate Random Samples"<<endl<<" 2. Generate Grid Samples"<<endl<<" 3. Generate Exact Grid Samples"<<endl;
clock_t sample_generation_tStart = clock();
int a=2;
//cin>>a;
if(a==1){ // Generate Random Samples
sample_local_planner_node=generate_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit);
}else if(a==2){ //Generate Grid Samples
sample_local_planner_node=generate_grid_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit );
}else if(a==3){ //Generate exact Grid Samples
sample_local_planner_node=generate_exact_grid_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit);
}else{
cout<<"Invaild selection!"<<endl<<"Program is terminating."<<endl;
//return 0;
}
clock_t sample_generation_tEnd = clock();
total_time_sample_generation= total_time_sample_generation+(double)(sample_generation_tEnd - sample_generation_tStart)/CLOCKS_PER_SEC;
//printf("from the loop: Time taken in the sample generation process: %.6fs\n", (double)(sample_generation_tEnd - sample_generation_tStart)/CLOCKS_PER_SEC);
ALL_sample_local_planner_node.push_back(sample_local_planner_node);
//write_the_sample_nodes_in_a_file(sample_local_planner_node, generate_sample_nodes_file_name(n));
}
//**************************************************************************************************
//**********************************Serial codes - ann lib******************************************
//**************************************************************************************************
int path_not_found=0;
double total_time_path_find_gpu=0;
if(!parallel){
double total_time_knn_ann=0;
double total_time_path_find_ann=0;
int path_not_found=0;
for(int n=0; n<input_nodes.size()-1; n++){
vector<local_planner_node*> sample_local_planner_node=ALL_sample_local_planner_node[n];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//******************************** NEIGHBOR SELECTION PROCESS*******************************
//for debugging
for(int k =0; k<sample_local_planner_node.size();k++){
sample_local_planner_node[k]->index=k;
}
clock_t knn_process_tStart=clock();
//Select neighbors using ANN library
//Nearest Neighbors
update_kth_neighbors_with_ann(sample_local_planner_node, obstacles);
//print_neighbors_update(sample_local_planner_node);
clock_t knn_process_tEnd = clock();
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
//write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(n));
}else{
cout<<"n="<<n<<" couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
//******************************************************************************************
total_time_knn_ann=total_time_knn_ann+(double)(knn_process_tEnd - knn_process_tStart)/CLOCKS_PER_SEC;
total_time_path_find_ann=total_time_path_find_ann+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
cout<<"TOTAL TIME FOR KNN-ANN="<<total_time_knn_ann<<endl;
cout<<"TOTAL TIME FOR path find-ANN="<<total_time_path_find_ann<<endl;
cout<<"Total path not found = "<<path_not_found<<endl;
}else{
//**************************************************************************************************
//**********************************parallel codes - gpu *******************************************
//**************************************************************************************************
double total_time_knn_gpu=0;
//******************************** NEIGHBOR SELECTION PROCESS*******************************
//for debugging
for(int i=0;i<input_nodes.size()-1;i++){
for(int k =0; k<ALL_sample_local_planner_node[i].size();k++){
ALL_sample_local_planner_node[i][k]->index=k;
}
}
clock_t knn_process_tStart=clock();
if(number_of_waypoints-1<=paths_in_each_loop){
//x,y,z coordinates of sample nodes
const int element_size_array=((int) ceil((double)init_number_of_samples/warp_size)*warp_size)*(input_nodes.size()-1);
float xyz[element_size_array*3]; copy_the_coordinates(xyz, ALL_sample_local_planner_node);//host
float *d_xyz; hipMalloc((void**)&d_xyz, sizeof(float)*element_size_array*3) ; // device
//obstacles
float h_obstacles[number_of_obstacles*4]; copy_obstacles(h_obstacles,obstacles);
float *d_obstacles; hipMalloc((void**)&d_obstacles, sizeof(float)*number_of_obstacles*4) ; // device
//knn_neighbour_nodes_no ->serial no in the vector
size_t neighbors_serial_no_size= sizeof(int) *init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1);
int h_neighbors_serial_no[init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1)];
int *d_neighbors_serial_no; hipMalloc((void**)&d_neighbors_serial_no, neighbors_serial_no_size ) ; // device
//knn_neighbour_corresponding_distances
size_t neighbors_sq_distance_size= sizeof(float) * init_number_of_samples * number_of_elements_for_knn * (input_nodes.size()-1);
float h_neighbors_sq_distance[init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1)];
float *d_neighbors_sq_distance; hipMalloc((void**)&d_neighbors_sq_distance, neighbors_sq_distance_size) ; // device
hipMemcpy(d_xyz, xyz, sizeof(float)*element_size_array*3, hipMemcpyHostToDevice);
hipMemcpy(d_obstacles, h_obstacles, sizeof(float)*number_of_obstacles*4, hipMemcpyHostToDevice);
hipDeviceSynchronize();
int blocks=number_of_blocks*(input_nodes.size()-1);
hipLaunchKernelGGL(( gpu_calculation), dim3(blocks), dim3(threads_per_block) , 0, 0, d_xyz, d_obstacles, d_neighbors_serial_no, d_neighbors_sq_distance);
hipDeviceSynchronize();
hipMemcpy(h_neighbors_serial_no,d_neighbors_serial_no, neighbors_serial_no_size, hipMemcpyDeviceToHost);
hipMemcpy(h_neighbors_sq_distance,d_neighbors_sq_distance, neighbors_sq_distance_size, hipMemcpyDeviceToHost);
//UPDATE THE DATA
typedef pair<local_planner_node*, float> apair;
int counter=0;
//cout<<endl<<endl;
for(int k=0; k<(input_nodes.size()-1); k++){ //2; k++){
//int k=0;
for(int i=0;i<init_number_of_samples; i++){
for(int j=0;j<number_of_elements_for_knn; j++){
ALL_sample_local_planner_node[k][i]->knn.push_back(apair( ALL_sample_local_planner_node[k][h_neighbors_serial_no[counter]], h_neighbors_sq_distance[counter] ) );
counter++;
}
//cout<<endl<<endl;
}
}
//path search
for(int n=0; n<input_nodes.size()-1; n++){
vector<local_planner_node*> sample_local_planner_node=ALL_sample_local_planner_node[n];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
//cout<<"finding path for n="<<n<<endl;
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(n));
}else{
cout<<"couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
total_time_path_find_gpu=total_time_path_find_gpu+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
}else{
//obstacles
double total_time_path_find_gpu=0;
for(int p=0;p<ALL_sample_local_planner_node.size()-1;p=p+paths_in_each_loop){
int start_index=p; int end_index;
if((p+paths_in_each_loop-1) <= (ALL_sample_local_planner_node.size()-1)){
end_index=p+paths_in_each_loop-1;
}else{
end_index=ALL_sample_local_planner_node.size()-1;
}
vector<vector<local_planner_node*> > temp_ALL_sample_node;
for(int q=start_index; q<=end_index;q++){
temp_ALL_sample_node.push_back(ALL_sample_local_planner_node[q]);
}
cout<<"temp_ALL_sample_node size="<<temp_ALL_sample_node.size()<<endl;
float h_obstacles[number_of_obstacles*4]; copy_obstacles(h_obstacles,obstacles);
float *d_obstacles; hipMalloc((void**)&d_obstacles, sizeof(float)*number_of_obstacles*4) ; // device
//x,y,z coordinates of sample nodes
const int element_size_array=((int) ceil((double)init_number_of_samples/warp_size)*warp_size)*(temp_ALL_sample_node.size());
float xyz[element_size_array*3]; copy_the_coordinates(xyz, temp_ALL_sample_node);//host
float *d_xyz; hipMalloc((void**)&d_xyz, sizeof(float)*element_size_array*3) ; // device
//knn_neighbour_nodes_no ->serial no in the vector
size_t neighbors_serial_no_size= sizeof(int) *init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size());
int h_neighbors_serial_no[init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size())];
int *d_neighbors_serial_no; hipMalloc((void**)&d_neighbors_serial_no, neighbors_serial_no_size ) ; // device
//knn_neighbour_corresponding_distances
size_t neighbors_sq_distance_size= sizeof(float) * init_number_of_samples * number_of_elements_for_knn * (temp_ALL_sample_node.size());
float h_neighbors_sq_distance[init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size())];
float *d_neighbors_sq_distance; hipMalloc((void**)&d_neighbors_sq_distance, neighbors_sq_distance_size) ; // device
hipMemcpy(d_xyz, xyz, sizeof(float)*element_size_array*3, hipMemcpyHostToDevice);
hipMemcpy(d_obstacles, h_obstacles, sizeof(float)*number_of_obstacles*4, hipMemcpyHostToDevice);
hipDeviceSynchronize();
int blocks=number_of_blocks*(temp_ALL_sample_node.size());
hipLaunchKernelGGL(( gpu_calculation), dim3(blocks), dim3(threads_per_block) , 0, 0, d_xyz, d_obstacles, d_neighbors_serial_no, d_neighbors_sq_distance);
hipDeviceSynchronize();
hipMemcpy(h_neighbors_serial_no,d_neighbors_serial_no, neighbors_serial_no_size, hipMemcpyDeviceToHost);
hipMemcpy(h_neighbors_sq_distance,d_neighbors_sq_distance, neighbors_sq_distance_size, hipMemcpyDeviceToHost);
//UPDATE THE DATA
typedef pair<local_planner_node*, float> apair;
int counter=0;
//cout<<endl<<endl;
//int q=start_index;
for(int k=0; k<(temp_ALL_sample_node.size()); k++){ //2; k++){
//int k=0;
for(int i=0;i<init_number_of_samples; i++){
for(int j=0;j<number_of_elements_for_knn; j++){
temp_ALL_sample_node[k][i]->knn.push_back(apair( temp_ALL_sample_node[k][h_neighbors_serial_no[counter]], h_neighbors_sq_distance[counter] ) );
counter++;
}
//cout<<endl<<endl;
}
// path find
vector<local_planner_node*> sample_local_planner_node=temp_ALL_sample_node[k];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
//cout<<"finding path for n="<<n<<endl;
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(k+p));
}else{
cout<<"couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
total_time_path_find_gpu=total_time_path_find_gpu+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
}
}
clock_t knn_process_tEnd = clock();
total_time_knn_gpu=total_time_knn_gpu+(double)(knn_process_tEnd - knn_process_tStart)/CLOCKS_PER_SEC -total_time_path_find_gpu;
cout<<"TOTAL TIME FOR path find-GPU="<<total_time_path_find_gpu<<"sec"<<endl;
cout<<"Total path not found = "<<path_not_found<<"sec"<<endl;
cout<<"TOTAL TIME FOR KNN-GPU="<<total_time_knn_gpu<<"sec"<<endl;
}
clock_t tEnd = clock();
cout<<" total_time_sample_generation= "<<total_time_sample_generation<<"sec"<<endl;
printf("At the end: Total Time taken: %.6fs\n", (double)(tEnd - tStart)/CLOCKS_PER_SEC);
cout<<"ALL_sample_local_planner_node.size() = "<< ALL_sample_local_planner_node.size()<<endl;
//obstacle volume
float volume=0;
for(int i=0; i<obstacles.size(); i++){
volume=volume+(4.0/3)*3.14*obstacles[i].radius*obstacles[i].radius*obstacles[i].radius;
}
cout<<"obstacle volume="<<volume<<endl;
cout<<"world volume="<<(map_x-map_x_start) * ( map_y-map_y_start) * (map_z-map_z_start)<<endl;
return 0;
}
| 03e82ddda8ce1a627ec4c7baeb0d715426b05731.cu | //The program finds the path for an UAV using Probabilistic Roadmap Method (PRM)- serial version.
// nvcc PRM_parallel_17.cu -use_fast_math -Xptxas -v -arch=sm_30 -lcurand -I/home/roksana/Dropbox/ann/include -L/home/roksana/Dropbox/ann/lib -lANN
//nvcc PRM_parallel_17.cu -use_fast_math -Xptxas -v -arch=sm_21 -lcurand -I/home/shimu/Dropbox/ann/include -L/home/shimu/Dropbox/ann/lib -lANN
//nvcc main1.cu -use_fast_math -Xptxas -v -arch=sm_21 -lcurand -I/home/shimu/ann/include -L/home/shimu/ann/lib -lANN
//input: start & destination location
//output: a set of locations that includes start location, intermediate locations and destination location
#include<iostream>
#include <stdint.h>
#include <inttypes.h>
#include <cmath>
#include <sstream>
#include "predefined_variables.h"
#include "data_structures.h"
#include "milestones.h"
#include "update_z_number.h"
#include "update_k_neighbors.h"
#include "ann_update_k_neighbors.h"
#include "find_shortest_path.h"
#include "device_BitonicSort_prm.cuh"
#include "device_codes.cuh"
#include "function_codes.cuh"
#include "obstacle_generation_file.cuh"
using namespace std;
int main(){
//generate_input_nodes();
bool parallel=false;//true;//false; //false; //true;
// read the list of start and end nodes
// the nodes are given as A, B, C, D. we need to read the nodes coordinate first. Then find the path between A to B, B to C, C to D.
vector<position> input_nodes=read_input_nodes();
if (input_nodes.size()<=1){
cout<<"Error: Not enough nodes to find path"<<endl;
return 0;
}
//for(int i=0; i<input_nodes.size();i++)cout<<input_nodes[i].x<< " "<<input_nodes[i].y<< " "<<input_nodes[i].z<< endl;
//****************************read obstacles************************************************
//generate_obstacle(input_nodes);
clock_t read_obstacle_tStart = clock();
vector<Sphere>obstacles; obstacles.resize(number_of_obstacles);
read_the_obstacles(obstacles);
clock_t read_obstacle_tEnd = clock();
printf("from the loop: Time taken to read the obstacle: %.6fs\n", (double)(read_obstacle_tEnd - read_obstacle_tStart)/CLOCKS_PER_SEC);
clock_t tStart = clock();
double total_time_sample_generation=0;
vector<vector<local_planner_node*> > ALL_sample_local_planner_node;
for(int n=0; n<input_nodes.size()-1; n++){
//******************initialize the start and the destination location***********************
local_planner_node* start= new local_planner_node();
start->set_local_planner_node(input_nodes[n].x, input_nodes[n].y, input_nodes[n].z);
cout<<"start: " <<start->p.x<<" "<<start->p.y<<" "<<start->p.z<<endl;
start->dist_from_start=0;
local_planner_node* destination = new local_planner_node();
destination->set_local_planner_node(input_nodes[n+1].x, input_nodes[n+1].y, input_nodes[n+1].z);
cout<<"destination: "<<destination->p.x<<" "<<destination->p.y<<" "<<destination->p.z<<endl;
//************************boundary conditions***********************************************
//check if the start and destination location collides with any obstacles
if(!boundary_condition_satisfied(start, destination, obstacles)){
cout<<"boundary condition doesn't satisfied either for start or destination"<<endl;
//return 0;
}
//******************* find the grid size for putting the samples****************************
vector<float> xyz_limit(6);
vector<int> sample_grid_size =find_the_sample_grid_size(start->p, destination->p, xyz_limit);
cout<<" nodes_in_x = "<<sample_grid_size[0]<<" nodes_in_y = "<<sample_grid_size[1]<<" nodes_in_z = "<<sample_grid_size[2]<<endl;
//cout<<"x_start="<<xyz_limit[0]<<", x_end="<<xyz_limit[1]<<", y_start="<<xyz_limit[2]<<", y_end="<<xyz_limit[3]<<", z_start="<<xyz_limit[4]<<", z_end="<<xyz_limit[5]<<endl;
//**************************generate the sample locations***********************************
vector<local_planner_node*> sample_local_planner_node;
//cout<<"Select one of the following options:"<<endl<<" 1. Generate Random Samples"<<endl<<" 2. Generate Grid Samples"<<endl<<" 3. Generate Exact Grid Samples"<<endl;
clock_t sample_generation_tStart = clock();
int a=2;
//cin>>a;
if(a==1){ // Generate Random Samples
sample_local_planner_node=generate_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit);
}else if(a==2){ //Generate Grid Samples
sample_local_planner_node=generate_grid_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit );
}else if(a==3){ //Generate exact Grid Samples
sample_local_planner_node=generate_exact_grid_milestone_sample_local_planner_node(obstacles, start, destination, sample_grid_size, xyz_limit);
}else{
cout<<"Invaild selection!"<<endl<<"Program is terminating."<<endl;
//return 0;
}
clock_t sample_generation_tEnd = clock();
total_time_sample_generation= total_time_sample_generation+(double)(sample_generation_tEnd - sample_generation_tStart)/CLOCKS_PER_SEC;
//printf("from the loop: Time taken in the sample generation process: %.6fs\n", (double)(sample_generation_tEnd - sample_generation_tStart)/CLOCKS_PER_SEC);
ALL_sample_local_planner_node.push_back(sample_local_planner_node);
//write_the_sample_nodes_in_a_file(sample_local_planner_node, generate_sample_nodes_file_name(n));
}
//**************************************************************************************************
//**********************************Serial codes - ann lib******************************************
//**************************************************************************************************
int path_not_found=0;
double total_time_path_find_gpu=0;
if(!parallel){
double total_time_knn_ann=0;
double total_time_path_find_ann=0;
int path_not_found=0;
for(int n=0; n<input_nodes.size()-1; n++){
vector<local_planner_node*> sample_local_planner_node=ALL_sample_local_planner_node[n];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//******************************** NEIGHBOR SELECTION PROCESS*******************************
//for debugging
for(int k =0; k<sample_local_planner_node.size();k++){
sample_local_planner_node[k]->index=k;
}
clock_t knn_process_tStart=clock();
//Select neighbors using ANN library
//Nearest Neighbors
update_kth_neighbors_with_ann(sample_local_planner_node, obstacles);
//print_neighbors_update(sample_local_planner_node);
clock_t knn_process_tEnd = clock();
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
//write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(n));
}else{
cout<<"n="<<n<<" couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
//******************************************************************************************
total_time_knn_ann=total_time_knn_ann+(double)(knn_process_tEnd - knn_process_tStart)/CLOCKS_PER_SEC;
total_time_path_find_ann=total_time_path_find_ann+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
cout<<"TOTAL TIME FOR KNN-ANN="<<total_time_knn_ann<<endl;
cout<<"TOTAL TIME FOR path find-ANN="<<total_time_path_find_ann<<endl;
cout<<"Total path not found = "<<path_not_found<<endl;
}else{
//**************************************************************************************************
//**********************************parallel codes - gpu *******************************************
//**************************************************************************************************
double total_time_knn_gpu=0;
//******************************** NEIGHBOR SELECTION PROCESS*******************************
//for debugging
for(int i=0;i<input_nodes.size()-1;i++){
for(int k =0; k<ALL_sample_local_planner_node[i].size();k++){
ALL_sample_local_planner_node[i][k]->index=k;
}
}
clock_t knn_process_tStart=clock();
if(number_of_waypoints-1<=paths_in_each_loop){
//x,y,z coordinates of sample nodes
const int element_size_array=((int) ceil((double)init_number_of_samples/warp_size)*warp_size)*(input_nodes.size()-1);
float xyz[element_size_array*3]; copy_the_coordinates(xyz, ALL_sample_local_planner_node);//host
float *d_xyz; cudaMalloc((void**)&d_xyz, sizeof(float)*element_size_array*3) ; // device
//obstacles
float h_obstacles[number_of_obstacles*4]; copy_obstacles(h_obstacles,obstacles);
float *d_obstacles; cudaMalloc((void**)&d_obstacles, sizeof(float)*number_of_obstacles*4) ; // device
//knn_neighbour_nodes_no ->serial no in the vector
size_t neighbors_serial_no_size= sizeof(int) *init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1);
int h_neighbors_serial_no[init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1)];
int *d_neighbors_serial_no; cudaMalloc((void**)&d_neighbors_serial_no, neighbors_serial_no_size ) ; // device
//knn_neighbour_corresponding_distances
size_t neighbors_sq_distance_size= sizeof(float) * init_number_of_samples * number_of_elements_for_knn * (input_nodes.size()-1);
float h_neighbors_sq_distance[init_number_of_samples*number_of_elements_for_knn * (input_nodes.size()-1)];
float *d_neighbors_sq_distance; cudaMalloc((void**)&d_neighbors_sq_distance, neighbors_sq_distance_size) ; // device
cudaMemcpy(d_xyz, xyz, sizeof(float)*element_size_array*3, cudaMemcpyHostToDevice);
cudaMemcpy(d_obstacles, h_obstacles, sizeof(float)*number_of_obstacles*4, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int blocks=number_of_blocks*(input_nodes.size()-1);
gpu_calculation<<< blocks, threads_per_block >>>(d_xyz, d_obstacles, d_neighbors_serial_no, d_neighbors_sq_distance);
cudaDeviceSynchronize();
cudaMemcpy(h_neighbors_serial_no,d_neighbors_serial_no, neighbors_serial_no_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_neighbors_sq_distance,d_neighbors_sq_distance, neighbors_sq_distance_size, cudaMemcpyDeviceToHost);
//UPDATE THE DATA
typedef pair<local_planner_node*, float> apair;
int counter=0;
//cout<<endl<<endl;
for(int k=0; k<(input_nodes.size()-1); k++){ //2; k++){
//int k=0;
for(int i=0;i<init_number_of_samples; i++){
for(int j=0;j<number_of_elements_for_knn; j++){
ALL_sample_local_planner_node[k][i]->knn.push_back(apair( ALL_sample_local_planner_node[k][h_neighbors_serial_no[counter]], h_neighbors_sq_distance[counter] ) );
counter++;
}
//cout<<endl<<endl;
}
}
//path search
for(int n=0; n<input_nodes.size()-1; n++){
vector<local_planner_node*> sample_local_planner_node=ALL_sample_local_planner_node[n];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
//cout<<"finding path for n="<<n<<endl;
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(n));
}else{
cout<<"couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
total_time_path_find_gpu=total_time_path_find_gpu+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
}else{
//obstacles
double total_time_path_find_gpu=0;
for(int p=0;p<ALL_sample_local_planner_node.size()-1;p=p+paths_in_each_loop){
int start_index=p; int end_index;
if((p+paths_in_each_loop-1) <= (ALL_sample_local_planner_node.size()-1)){
end_index=p+paths_in_each_loop-1;
}else{
end_index=ALL_sample_local_planner_node.size()-1;
}
vector<vector<local_planner_node*> > temp_ALL_sample_node;
for(int q=start_index; q<=end_index;q++){
temp_ALL_sample_node.push_back(ALL_sample_local_planner_node[q]);
}
cout<<"temp_ALL_sample_node size="<<temp_ALL_sample_node.size()<<endl;
float h_obstacles[number_of_obstacles*4]; copy_obstacles(h_obstacles,obstacles);
float *d_obstacles; cudaMalloc((void**)&d_obstacles, sizeof(float)*number_of_obstacles*4) ; // device
//x,y,z coordinates of sample nodes
const int element_size_array=((int) ceil((double)init_number_of_samples/warp_size)*warp_size)*(temp_ALL_sample_node.size());
float xyz[element_size_array*3]; copy_the_coordinates(xyz, temp_ALL_sample_node);//host
float *d_xyz; cudaMalloc((void**)&d_xyz, sizeof(float)*element_size_array*3) ; // device
//knn_neighbour_nodes_no ->serial no in the vector
size_t neighbors_serial_no_size= sizeof(int) *init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size());
int h_neighbors_serial_no[init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size())];
int *d_neighbors_serial_no; cudaMalloc((void**)&d_neighbors_serial_no, neighbors_serial_no_size ) ; // device
//knn_neighbour_corresponding_distances
size_t neighbors_sq_distance_size= sizeof(float) * init_number_of_samples * number_of_elements_for_knn * (temp_ALL_sample_node.size());
float h_neighbors_sq_distance[init_number_of_samples*number_of_elements_for_knn * (temp_ALL_sample_node.size())];
float *d_neighbors_sq_distance; cudaMalloc((void**)&d_neighbors_sq_distance, neighbors_sq_distance_size) ; // device
cudaMemcpy(d_xyz, xyz, sizeof(float)*element_size_array*3, cudaMemcpyHostToDevice);
cudaMemcpy(d_obstacles, h_obstacles, sizeof(float)*number_of_obstacles*4, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int blocks=number_of_blocks*(temp_ALL_sample_node.size());
gpu_calculation<<< blocks, threads_per_block >>>(d_xyz, d_obstacles, d_neighbors_serial_no, d_neighbors_sq_distance);
cudaDeviceSynchronize();
cudaMemcpy(h_neighbors_serial_no,d_neighbors_serial_no, neighbors_serial_no_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_neighbors_sq_distance,d_neighbors_sq_distance, neighbors_sq_distance_size, cudaMemcpyDeviceToHost);
//UPDATE THE DATA
typedef pair<local_planner_node*, float> apair;
int counter=0;
//cout<<endl<<endl;
//int q=start_index;
for(int k=0; k<(temp_ALL_sample_node.size()); k++){ //2; k++){
//int k=0;
for(int i=0;i<init_number_of_samples; i++){
for(int j=0;j<number_of_elements_for_knn; j++){
temp_ALL_sample_node[k][i]->knn.push_back(apair( temp_ALL_sample_node[k][h_neighbors_serial_no[counter]], h_neighbors_sq_distance[counter] ) );
counter++;
}
//cout<<endl<<endl;
}
// path find
vector<local_planner_node*> sample_local_planner_node=temp_ALL_sample_node[k];
local_planner_node* start=sample_local_planner_node[init_number_of_samples-2];
local_planner_node* destination =sample_local_planner_node[init_number_of_samples-1];
//********************************** find the shortest path ********************************
clock_t path_find_tStart = clock();
//cout<<"finding path for n="<<n<<endl;
if (find_the_shortest_path(start, destination, sample_local_planner_node)){
write_in_a_file(sample_local_planner_node, start, destination, generate_path_output_file_name(k+p));
}else{
cout<<"couldn't found the path"<<endl;
path_not_found++;
}
clock_t path_find_tEnd = clock();
total_time_path_find_gpu=total_time_path_find_gpu+(double)(path_find_tEnd - path_find_tStart)/CLOCKS_PER_SEC;
}
}
}
clock_t knn_process_tEnd = clock();
total_time_knn_gpu=total_time_knn_gpu+(double)(knn_process_tEnd - knn_process_tStart)/CLOCKS_PER_SEC -total_time_path_find_gpu;
cout<<"TOTAL TIME FOR path find-GPU="<<total_time_path_find_gpu<<"sec"<<endl;
cout<<"Total path not found = "<<path_not_found<<"sec"<<endl;
cout<<"TOTAL TIME FOR KNN-GPU="<<total_time_knn_gpu<<"sec"<<endl;
}
clock_t tEnd = clock();
cout<<" total_time_sample_generation= "<<total_time_sample_generation<<"sec"<<endl;
printf("At the end: Total Time taken: %.6fs\n", (double)(tEnd - tStart)/CLOCKS_PER_SEC);
cout<<"ALL_sample_local_planner_node.size() = "<< ALL_sample_local_planner_node.size()<<endl;
//obstacle volume
float volume=0;
for(int i=0; i<obstacles.size(); i++){
volume=volume+(4.0/3)*3.14*obstacles[i].radius*obstacles[i].radius*obstacles[i].radius;
}
cout<<"obstacle volume="<<volume<<endl;
cout<<"world volume="<<(map_x-map_x_start) * ( map_y-map_y_start) * (map_z-map_z_start)<<endl;
return 0;
}
|
d9a4b74f43a9a33ce4010dffaad98ce1aaa64b41.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ------------------------------------------------------------------------------
*
* MIT License
*
* Copyright (c) 2021 Parallel Applications Modelling Group - GMAP
* GMAP website: https://gmap.pucrs.br
*
* Pontifical Catholic University of Rio Grande do Sul (PUCRS)
* Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* ------------------------------------------------------------------------------
*
* The original NPB 3.4 version was written in Fortran and belongs to:
* http://www.nas.nasa.gov/Software/NPB/
*
* ------------------------------------------------------------------------------
*
* The serial C++ version is a translation of the original NPB 3.4
* Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER
*
* Authors of the C++ code:
* Dalvan Griebler <[email protected]>
* Gabriell Araujo <[email protected]>
* Jnior Lff <[email protected]>
*
* ------------------------------------------------------------------------------
*/
#include "wtime.hpp"
#include <cstdlib>
#include <hip/hip_runtime.h>
/* prototype */
void wtime(double*);
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time(void){
double t;
wtime(&t);
return(t);
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n){
elapsed[n] = 0.0;
}
void timer_clear(){
elapsed[0] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n){
start[n] = elapsed_time();
}
void timer_start(){
start[0] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n){
hipDeviceSynchronize();
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
void timer_stop(){
double t, now;
now = elapsed_time();
t = now - start[0];
elapsed[0] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n){
return(elapsed[n]);
}
double timer_read(){
return(elapsed[0]);
}
| d9a4b74f43a9a33ce4010dffaad98ce1aaa64b41.cu | /*
* ------------------------------------------------------------------------------
*
* MIT License
*
* Copyright (c) 2021 Parallel Applications Modelling Group - GMAP
* GMAP website: https://gmap.pucrs.br
*
* Pontifical Catholic University of Rio Grande do Sul (PUCRS)
* Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* ------------------------------------------------------------------------------
*
* The original NPB 3.4 version was written in Fortran and belongs to:
* http://www.nas.nasa.gov/Software/NPB/
*
* ------------------------------------------------------------------------------
*
* The serial C++ version is a translation of the original NPB 3.4
* Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER
*
* Authors of the C++ code:
* Dalvan Griebler <[email protected]>
* Gabriell Araujo <[email protected]>
* Júnior Löff <[email protected]>
*
* ------------------------------------------------------------------------------
*/
#include "wtime.hpp"
#include <cstdlib>
#include <cuda.h>
/* prototype */
void wtime(double*);
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time(void){
double t;
wtime(&t);
return(t);
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n){
elapsed[n] = 0.0;
}
void timer_clear(){
elapsed[0] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n){
start[n] = elapsed_time();
}
void timer_start(){
start[0] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n){
cudaDeviceSynchronize();
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
void timer_stop(){
double t, now;
now = elapsed_time();
t = now - start[0];
elapsed[0] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n){
return(elapsed[n]);
}
double timer_read(){
return(elapsed[0]);
}
|
c6f6705263e4a757829e69e87a3f070480077cd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_invertDVF.cuh"
__global__ void kernel_invertDVF(float *mx2, float *my2, float *mz2, hipTextureObject_t mx, hipTextureObject_t my, hipTextureObject_t mz, int nx, int ny, int nz, int niter)
{
int ix = BLOCKWIDTH * blockIdx.x + threadIdx.x;
int iy = BLOCKHEIGHT * blockIdx.y + threadIdx.y;
int iz = BLOCKDEPTH * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
float x = 0.0f, y = 0.0f, z = 0.0f;
for (int iter = 0; iter < niter; iter ++){
x = - tex3D<float>(mx, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
y = - tex3D<float>(my, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
z = - tex3D<float>(mz, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
}
mx2[id] = x;
my2[id] = y;
mz2[id] = z;
} | c6f6705263e4a757829e69e87a3f070480077cd3.cu | #include "kernel_invertDVF.cuh"
__global__ void kernel_invertDVF(float *mx2, float *my2, float *mz2, cudaTextureObject_t mx, cudaTextureObject_t my, cudaTextureObject_t mz, int nx, int ny, int nz, int niter)
{
int ix = BLOCKWIDTH * blockIdx.x + threadIdx.x;
int iy = BLOCKHEIGHT * blockIdx.y + threadIdx.y;
int iz = BLOCKDEPTH * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
float x = 0.0f, y = 0.0f, z = 0.0f;
for (int iter = 0; iter < niter; iter ++){
x = - tex3D<float>(mx, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
y = - tex3D<float>(my, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
z = - tex3D<float>(mz, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
}
mx2[id] = x;
my2[id] = y;
mz2[id] = z;
} |
dcf5e3539be1438cfbe163df152c01fb82de8435.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDA BarnesHut v1.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2010 The University of Texas at Austin
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA, or see <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
Author: Dr. Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
// thread count
#define THREADS0 512
#define THREADS1 512
#define THREADS2 288
#define THREADS3 256
#define THREADS4 512
#define THREADS5 384
#define THREADS6 512
// block count = factor * SMs
#define FACTOR0 2
#define FACTOR1 1
#define FACTOR2 2
#define FACTOR3 1
#define FACTOR4 1
#define FACTOR5 2
#define FACTOR6 1
#define WARPSIZE 32
#define MAXDEPTH 26
/************************************************************************************/
// input generation
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/************************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd, *errd, *sortd, *childd, *countd, *startd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__device__ int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ float radiusd;
/************************************************************************************/
/*** initialize memory **************************************************************/
/************************************************************************************/
__global__ void InitializationKernel()
{
register int i, inc;
i = threadIdx.x + blockIdx.x * blockDim.x;
if (i == 0) {
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
inc = blockDim.x * gridDim.x;
for (; i < nbodiesd; i += inc) {
accxd[i] = 0.0f;
accyd[i] = 0.0f;
acczd[i] = 0.0f;
}
}
/************************************************************************************/
/*** compute center and radius ******************************************************/
/************************************************************************************/
__global__ void BoundingBoxKernel()
{
register int i, j, inc;
register float tmp;
__shared__ float minx[THREADS1], miny[THREADS1], minz[THREADS1];
__shared__ float maxx[THREADS1], maxy[THREADS1], maxz[THREADS1];
i = threadIdx.x;
if (i == 0) {
minx[0] = posxd[0];
miny[0] = posyd[0];
minz[0] = poszd[0];
}
__syncthreads();
// initialize with valid data (in case #bodies < #threads)
minx[i] = maxx[i] = minx[0];
miny[i] = maxy[i] = miny[0];
minz[i] = maxz[i] = minz[0];
inc = blockDim.x * gridDim.x;
j = i + blockIdx.x * blockDim.x;
// scan bodies
while (j < nbodiesd) {
tmp = posxd[j];
minx[i] = min(minx[i], tmp);
maxx[i] = max(maxx[i], tmp);
tmp = posyd[j];
miny[i] = min(miny[i], tmp);
maxy[i] = max(maxy[i], tmp);
tmp = poszd[j];
minz[i] = min(minz[i], tmp);
maxz[i] = max(maxz[i], tmp);
j += inc; // move on to next body
}
// reduction in shared memory
j = blockDim.x >> 1;
while (j > 0) {
__syncthreads();
if (i < j) {
minx[i] = min(minx[i], minx[i+j]);
miny[i] = min(miny[i], miny[i+j]);
minz[i] = min(minz[i], minz[i+j]);
maxx[i] = max(maxx[i], maxx[i+j]);
maxy[i] = max(maxy[i], maxy[i+j]);
maxz[i] = max(maxz[i], maxz[i+j]);
}
j >>= 1;
}
if (i == 0) {
// write block result to global memory
j = blockIdx.x;
minxd[j] = minx[0];
minyd[j] = miny[0];
minzd[j] = minz[0];
maxxd[j] = maxx[0];
maxyd[j] = maxy[0];
maxzd[j] = maxz[0];
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx[0] = min(minx[0], minxd[j]);
miny[0] = min(miny[0], minyd[j]);
minz[0] = min(minz[0], minzd[j]);
maxx[0] = max(maxx[0], maxxd[j]);
maxy[0] = max(maxy[0], maxyd[j]);
maxz[0] = max(maxz[0], maxzd[j]);
}
// compute radius
tmp = max(maxx[0] - minx[0], maxy[0] - miny[0]);
radiusd = max(tmp, maxz[0] - minz[0]) * 0.5f;
// create root node
j = nnodesd;
massd[j] = -1.0f;
startd[j] = 0;
posxd[j] = (minx[0] + maxx[0]) * 0.5f;
posyd[j] = (miny[0] + maxy[0]) * 0.5f;
poszd[j] = (minz[0] + maxz[0]) * 0.5f;
#pragma unroll 8
for (i = 0; i < 8; i++) childd[j*8+i] = -1;
bottomd = j;
stepd++;
}
}
}
/************************************************************************************/
/*** build tree *********************************************************************/
/************************************************************************************/
__global__ void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
__shared__ float radius, rootx, rooty, rootz;
i = threadIdx.x;
if (i == 0) {
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
}
__syncthreads();
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i += blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
ch = childd[n*8+j];
// follow path to leaf cell
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS(&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub(&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
#pragma unroll 8
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence();
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads();
}
atomicMax(&maxdepthd, localmaxdepth);
}
/************************************************************************************/
/*** compute center of mass *********************************************************/
/************************************************************************************/
__global__ void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt;
register float m, cm, px, py, pz;
__shared__ int bottom, child[THREADS3 * 8];
if (0 == threadIdx.x) {
bottom = bottomd;
}
__syncthreads();
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
#pragma unroll 8
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence();
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/************************************************************************************/
/*** sort bodies ********************************************************************/
/************************************************************************************/
__global__ void SortKernel()
{
register int i, k, ch, dec, start, bottom;
__shared__ int bottoms;
if (0 == threadIdx.x) {
bottoms = bottomd;
}
__syncthreads();
bottom = bottoms;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
#pragma unroll 8
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in sorted array
start++;
}
}
k -= dec; // move on to next cell
}
}
}
/************************************************************************************/
/*** compute force ******************************************************************/
/************************************************************************************/
__global__ void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ int step, maxdepth;
__shared__ int ch[THREADS5/WARPSIZE];
__shared__ int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float nx[THREADS5/WARPSIZE], ny[THREADS5/WARPSIZE], nz[THREADS5/WARPSIZE], nm[THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence_block();
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
n = childd[node[depth]*8+pos[depth]]; // load child pointer
pos[depth]++;
ch[base] = n; // cache child pointer
if (n >= 0) {
// cache position and mass
nx[base] = posxd[n];
ny[base] = posyd[n];
nz[base] = poszd[n];
nm[base] = massd[n];
}
}
__threadfence_block();
// all threads retrieve cached data
n = ch[base];
if (n >= 0) {
dx = nx[base] - px;
dy = ny[base] - py;
dz = nz[base] - pz;
tmp = dx*dx + dy*dy + dz*dz; // compute distance squared
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
if (n != i) {
tmp = rsqrtf(tmp + epssqd); // compute distance
tmp = nm[base] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
}
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence_block();
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/************************************************************************************/
/*** advance bodies *****************************************************************/
/************************************************************************************/
__global__ void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
inc = blockDim.x * gridDim.x;
// iterate over all bodies assigned to thread
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/************************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
/************************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
clock_t starttime, endtime;
hipEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v1.1\nCopyright (c) 2010 The University of Texas at Austin\n");
if (argc != 3) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n");
exit(-1);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if ((deviceProp.major < 1) || ((deviceProp.major == 1) && (deviceProp.minor < 2))) {
fprintf(stderr, "Need at least compute capability 1.2\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (WARPSIZE < MAXDEPTH) {
fprintf(stderr, "Warp size must be greater than or equal to MAXDEPTH\n");
exit(-1);
}
if ((THREADS0 <= 0) || ((THREADS0 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS0 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS1 <= 0) || ((THREADS1 & (WARPSIZE-1)) != 0) || ((THREADS1 & (THREADS1-1)) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero, an integer multiple of the warp size, and a power of two\n");
exit(-1);
}
if ((THREADS2 <= 0) || ((THREADS2 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS2 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS3 <= 0) || ((THREADS3 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS3 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS4 <= 0) || ((THREADS4 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS4 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS5 <= 0) || ((THREADS5 & (WARPSIZE-1)) != 0)) { /* must be a multiple of the warp size */
fprintf(stderr, "THREADS5 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS6 <= 0) || ((THREADS6 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS6 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
hipGetLastError(); // reset error value
for (run = 0; run < 1; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (hipSuccess != hipMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(errd, &errl, sizeof(int))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(sortd, &sortl, sizeof(int))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(countd, &countl, sizeof(int))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(startd, &startl, sizeof(int))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(childd, &childl, sizeof(int))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(massd, &massl, sizeof(int))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posxd, &posxl, sizeof(int))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posyd, &posyl, sizeof(int))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(poszd, &poszl, sizeof(int))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velxd, &velxl, sizeof(int))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velyd, &velyl, sizeof(int))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velzd, &velzl, sizeof(int))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accxd, &accxl, sizeof(int))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accyd, &accyl, sizeof(int))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(acczd, &acczl, sizeof(int))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxxd, &maxxl, sizeof(int))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxyd, &maxyl, sizeof(int))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxzd, &maxzl, sizeof(int))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minxd, &minxl, sizeof(int))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minyd, &minyl, sizeof(int))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minzd, &minzl, sizeof(int))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// generate input
drndset(7);
rsc = (3 * M_PI) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
hipEventCreate(&start); hipEventCreate(&stop);
starttime = clock();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitializationKernel), dim3(blocks*FACTOR0), dim3(THREADS0), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[0] += time;
for (step = 0; step < timesteps; step++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks*FACTOR1), dim3(THREADS1), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[1] += time;
//CudaTest("kernel 1 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks*FACTOR2), dim3(THREADS2), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[2] += time;
//CudaTest("kernel 2 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks*FACTOR3), dim3(THREADS3), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[3] += time;
//CudaTest("kernel 3 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SortKernel), dim3(blocks*FACTOR4), dim3(512), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[4] += time;
//CudaTest("kernel 4 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks*FACTOR5), dim3(THREADS5), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[5] += time;
//CudaTest("kernel 5 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks*FACTOR6), dim3(THREADS6), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[6] += time;
//CudaTest("kernel 6 launch failed");
}
endtime = clock();
CudaTest("kernel launch failed");
hipEventDestroy(start); hipEventDestroy(stop);
// transfer result back to CPU
if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %ld ms (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %ld ms\n", mintime);
// print output
for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
}
return 0;
}
| dcf5e3539be1438cfbe163df152c01fb82de8435.cu | /*
CUDA BarnesHut v1.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2010 The University of Texas at Austin
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA, or see <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
Author: Dr. Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
// thread count
#define THREADS0 512
#define THREADS1 512
#define THREADS2 288
#define THREADS3 256
#define THREADS4 512
#define THREADS5 384
#define THREADS6 512
// block count = factor * SMs
#define FACTOR0 2
#define FACTOR1 1
#define FACTOR2 2
#define FACTOR3 1
#define FACTOR4 1
#define FACTOR5 2
#define FACTOR6 1
#define WARPSIZE 32
#define MAXDEPTH 26
/************************************************************************************/
// input generation
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/************************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd, *errd, *sortd, *childd, *countd, *startd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__device__ int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ float radiusd;
/************************************************************************************/
/*** initialize memory **************************************************************/
/************************************************************************************/
__global__ void InitializationKernel()
{
register int i, inc;
i = threadIdx.x + blockIdx.x * blockDim.x;
if (i == 0) {
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
inc = blockDim.x * gridDim.x;
for (; i < nbodiesd; i += inc) {
accxd[i] = 0.0f;
accyd[i] = 0.0f;
acczd[i] = 0.0f;
}
}
/************************************************************************************/
/*** compute center and radius ******************************************************/
/************************************************************************************/
__global__ void BoundingBoxKernel()
{
register int i, j, inc;
register float tmp;
__shared__ float minx[THREADS1], miny[THREADS1], minz[THREADS1];
__shared__ float maxx[THREADS1], maxy[THREADS1], maxz[THREADS1];
i = threadIdx.x;
if (i == 0) {
minx[0] = posxd[0];
miny[0] = posyd[0];
minz[0] = poszd[0];
}
__syncthreads();
// initialize with valid data (in case #bodies < #threads)
minx[i] = maxx[i] = minx[0];
miny[i] = maxy[i] = miny[0];
minz[i] = maxz[i] = minz[0];
inc = blockDim.x * gridDim.x;
j = i + blockIdx.x * blockDim.x;
// scan bodies
while (j < nbodiesd) {
tmp = posxd[j];
minx[i] = min(minx[i], tmp);
maxx[i] = max(maxx[i], tmp);
tmp = posyd[j];
miny[i] = min(miny[i], tmp);
maxy[i] = max(maxy[i], tmp);
tmp = poszd[j];
minz[i] = min(minz[i], tmp);
maxz[i] = max(maxz[i], tmp);
j += inc; // move on to next body
}
// reduction in shared memory
j = blockDim.x >> 1;
while (j > 0) {
__syncthreads();
if (i < j) {
minx[i] = min(minx[i], minx[i+j]);
miny[i] = min(miny[i], miny[i+j]);
minz[i] = min(minz[i], minz[i+j]);
maxx[i] = max(maxx[i], maxx[i+j]);
maxy[i] = max(maxy[i], maxy[i+j]);
maxz[i] = max(maxz[i], maxz[i+j]);
}
j >>= 1;
}
if (i == 0) {
// write block result to global memory
j = blockIdx.x;
minxd[j] = minx[0];
minyd[j] = miny[0];
minzd[j] = minz[0];
maxxd[j] = maxx[0];
maxyd[j] = maxy[0];
maxzd[j] = maxz[0];
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx[0] = min(minx[0], minxd[j]);
miny[0] = min(miny[0], minyd[j]);
minz[0] = min(minz[0], minzd[j]);
maxx[0] = max(maxx[0], maxxd[j]);
maxy[0] = max(maxy[0], maxyd[j]);
maxz[0] = max(maxz[0], maxzd[j]);
}
// compute radius
tmp = max(maxx[0] - minx[0], maxy[0] - miny[0]);
radiusd = max(tmp, maxz[0] - minz[0]) * 0.5f;
// create root node
j = nnodesd;
massd[j] = -1.0f;
startd[j] = 0;
posxd[j] = (minx[0] + maxx[0]) * 0.5f;
posyd[j] = (miny[0] + maxy[0]) * 0.5f;
poszd[j] = (minz[0] + maxz[0]) * 0.5f;
#pragma unroll 8
for (i = 0; i < 8; i++) childd[j*8+i] = -1;
bottomd = j;
stepd++;
}
}
}
/************************************************************************************/
/*** build tree *********************************************************************/
/************************************************************************************/
__global__ void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
__shared__ float radius, rootx, rooty, rootz;
i = threadIdx.x;
if (i == 0) {
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
}
__syncthreads();
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i += blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
ch = childd[n*8+j];
// follow path to leaf cell
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS(&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub(&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
#pragma unroll 8
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence();
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads();
}
atomicMax(&maxdepthd, localmaxdepth);
}
/************************************************************************************/
/*** compute center of mass *********************************************************/
/************************************************************************************/
__global__ void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt;
register float m, cm, px, py, pz;
__shared__ int bottom, child[THREADS3 * 8];
if (0 == threadIdx.x) {
bottom = bottomd;
}
__syncthreads();
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
#pragma unroll 8
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence();
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/************************************************************************************/
/*** sort bodies ********************************************************************/
/************************************************************************************/
__global__ void SortKernel()
{
register int i, k, ch, dec, start, bottom;
__shared__ int bottoms;
if (0 == threadIdx.x) {
bottoms = bottomd;
}
__syncthreads();
bottom = bottoms;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
#pragma unroll 8
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in sorted array
start++;
}
}
k -= dec; // move on to next cell
}
}
}
/************************************************************************************/
/*** compute force ******************************************************************/
/************************************************************************************/
__global__ void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ int step, maxdepth;
__shared__ int ch[THREADS5/WARPSIZE];
__shared__ int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float nx[THREADS5/WARPSIZE], ny[THREADS5/WARPSIZE], nz[THREADS5/WARPSIZE], nm[THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence_block();
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
n = childd[node[depth]*8+pos[depth]]; // load child pointer
pos[depth]++;
ch[base] = n; // cache child pointer
if (n >= 0) {
// cache position and mass
nx[base] = posxd[n];
ny[base] = posyd[n];
nz[base] = poszd[n];
nm[base] = massd[n];
}
}
__threadfence_block();
// all threads retrieve cached data
n = ch[base];
if (n >= 0) {
dx = nx[base] - px;
dy = ny[base] - py;
dz = nz[base] - pz;
tmp = dx*dx + dy*dy + dz*dz; // compute distance squared
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
if (n != i) {
tmp = rsqrtf(tmp + epssqd); // compute distance
tmp = nm[base] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
}
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence_block();
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/************************************************************************************/
/*** advance bodies *****************************************************************/
/************************************************************************************/
__global__ void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
inc = blockDim.x * gridDim.x;
// iterate over all bodies assigned to thread
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/************************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
/************************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
clock_t starttime, endtime;
cudaEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v1.1\nCopyright (c) 2010 The University of Texas at Austin\n");
if (argc != 3) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n");
exit(-1);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if ((deviceProp.major < 1) || ((deviceProp.major == 1) && (deviceProp.minor < 2))) {
fprintf(stderr, "Need at least compute capability 1.2\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (WARPSIZE < MAXDEPTH) {
fprintf(stderr, "Warp size must be greater than or equal to MAXDEPTH\n");
exit(-1);
}
if ((THREADS0 <= 0) || ((THREADS0 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS0 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS1 <= 0) || ((THREADS1 & (WARPSIZE-1)) != 0) || ((THREADS1 & (THREADS1-1)) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero, an integer multiple of the warp size, and a power of two\n");
exit(-1);
}
if ((THREADS2 <= 0) || ((THREADS2 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS2 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS3 <= 0) || ((THREADS3 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS3 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS4 <= 0) || ((THREADS4 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS4 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS5 <= 0) || ((THREADS5 & (WARPSIZE-1)) != 0)) { /* must be a multiple of the warp size */
fprintf(stderr, "THREADS5 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
if ((THREADS6 <= 0) || ((THREADS6 & (WARPSIZE-1)) != 0)) {
fprintf(stderr, "THREADS6 must be greater than zero and an integer multiple of the warp size\n");
exit(-1);
}
cudaGetLastError(); // reset error value
for (run = 0; run < 1; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (cudaSuccess != cudaMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(errd, &errl, sizeof(int))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(sortd, &sortl, sizeof(int))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(countd, &countl, sizeof(int))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(startd, &startl, sizeof(int))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(childd, &childl, sizeof(int))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(massd, &massl, sizeof(int))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posxd, &posxl, sizeof(int))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posyd, &posyl, sizeof(int))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(poszd, &poszl, sizeof(int))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velxd, &velxl, sizeof(int))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velyd, &velyl, sizeof(int))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velzd, &velzl, sizeof(int))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accxd, &accxl, sizeof(int))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accyd, &accyl, sizeof(int))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(acczd, &acczl, sizeof(int))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxxd, &maxxl, sizeof(int))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxyd, &maxyl, sizeof(int))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxzd, &maxzl, sizeof(int))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minxd, &minxl, sizeof(int))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minyd, &minyl, sizeof(int))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minzd, &minzl, sizeof(int))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// generate input
drndset(7);
rsc = (3 * M_PI) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
cudaEventCreate(&start); cudaEventCreate(&stop);
starttime = clock();
cudaEventRecord(start, 0);
InitializationKernel<<<blocks*FACTOR0, THREADS0>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[0] += time;
for (step = 0; step < timesteps; step++) {
cudaEventRecord(start, 0);
BoundingBoxKernel<<<blocks*FACTOR1, THREADS1>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[1] += time;
//CudaTest("kernel 1 launch failed");
cudaEventRecord(start, 0);
TreeBuildingKernel<<<blocks*FACTOR2, THREADS2>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[2] += time;
//CudaTest("kernel 2 launch failed");
cudaEventRecord(start, 0);
SummarizationKernel<<<blocks*FACTOR3, THREADS3>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[3] += time;
//CudaTest("kernel 3 launch failed");
cudaEventRecord(start, 0);
SortKernel<<<blocks*FACTOR4, 512>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[4] += time;
//CudaTest("kernel 4 launch failed");
cudaEventRecord(start, 0);
ForceCalculationKernel<<<blocks*FACTOR5, THREADS5>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[5] += time;
//CudaTest("kernel 5 launch failed");
cudaEventRecord(start, 0);
IntegrationKernel<<<blocks*FACTOR6, THREADS6>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[6] += time;
//CudaTest("kernel 6 launch failed");
}
endtime = clock();
CudaTest("kernel launch failed");
cudaEventDestroy(start); cudaEventDestroy(stop);
// transfer result back to CPU
if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %ld ms (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %ld ms\n", mintime);
// print output
for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
}
return 0;
}
|
fc3ed68dce6bad6be4ece4770389ec37983f9a7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "libvideo.h"
#define SEUIL 50
#define RED 0
#define GREEN 1
#define BLUE 2
__global__ void kernel_grey(char * frame, int height, int width)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int red, green, blue;
for(; i<width * height; i+= ( gridDim.x * blockDim.x))
{
red = frame[i*3 + RED];
green = frame[i*3 + GREEN];
blue = frame[i*3 + BLUE];
float moy = 0.3*red + 0.6*green + 0.1*blue;
frame[i*3 + RED] = (char)moy;
frame[i*3 + GREEN] = (char)moy;
frame[i*3 + BLUE] = (char)moy;
}
}
__global__ void kernel_sobel(char * frame_in, char * frame_out, int height, int width)
{
int i = threadIdx.x+1;
int j = blockIdx.x+1;
float no, n, ne, so, s, se, o, e;
float deltaX, deltaY;
for(; j<height-1; j+=gridDim.x)
{
for(;i<width-1; i+=blockDim.x)
{
no = frame_in[((j+1)*width+(i-1))*3+RED];
n = frame_in[((j+1)*width+(i ))*3+RED];
ne = frame_in[((j+1)*width+(i+1))*3+RED];
so = frame_in[((j-1)*width+(i-1))*3+RED];
s = frame_in[((j-1)*width+(i ))*3+RED];
se = frame_in[((j-1)*width+(i+1))*3+RED];
o = frame_in[((j )*width+(i-1))*3+RED];
e = frame_in[((j )*width+(i+1))*3+RED];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_red = sqrt(deltaX*deltaX + deltaY*deltaY);
no = frame_in[((j+1)*width+(i-1))*3+GREEN];
n = frame_in[((j+1)*width+(i ))*3+GREEN];
ne = frame_in[((j+1)*width+(i+1))*3+GREEN];
so = frame_in[((j-1)*width+(i-1))*3+GREEN];
s = frame_in[((j-1)*width+(i ))*3+GREEN];
se = frame_in[((j-1)*width+(i+1))*3+GREEN];
o = frame_in[((j )*width+(i-1))*3+GREEN];
e = frame_in[((j )*width+(i+1))*3+GREEN];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_green = sqrt(deltaX*deltaX + deltaY*deltaY);
no = frame_in[((j+1)*width+(i-1))*3+BLUE];
n = frame_in[((j+1)*width+(i ))*3+BLUE];
ne = frame_in[((j+1)*width+(i+1))*3+BLUE];
so = frame_in[((j-1)*width+(i-1))*3+BLUE];
s = frame_in[((j-1)*width+(i ))*3+BLUE];
se = frame_in[((j-1)*width+(i+1))*3+BLUE];
o = frame_in[((j )*width+(i-1))*3+BLUE];
e = frame_in[((j )*width+(i+1))*3+BLUE];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_blue = sqrt(deltaX*deltaX + deltaY*deltaY);
float sobel_val = (val_red + val_green + val_blue)/3;
if(sobel_val > SEUIL)
{
frame_out[(j*width+i)*3+RED] = (char)255;
frame_out[(j*width+i)*3+GREEN] = (char)255;
frame_out[(j*width+i)*3+BLUE] = (char)255;
}
else
{
frame_out[(j*width+i)*3+RED] = (char)0;
frame_out[(j*width+i)*3+GREEN] = (char)0;
frame_out[(j*width+i)*3+BLUE] = (char)0;
}
}
}
}
int main (int argc, char * argv[])
{
int cpt_frame;
int frame_count;
int width, height;
int nGPUs;
hipGetDeviceCount(&nGPUs);
printf("Opening videos - read and write\n"); fflush(stdout);
OpenReadAndWriteVideo("./Wildlife.wmv", "./Wildlife_sobel.wmv");
printf("----------------------------------------\n");
frame_count = getFrameCount();
width = getWidth();
height = getHeight();
printf("Frame count = %d\n", frame_count); fflush(stdout);
printf("Width of frames: %d\n", width); fflush(stdout);
printf("Height of frames: %d\n", height); fflush(stdout);
// char * frames = (char *) malloc( sizeof(char) * frame_count * width * height * 3);
char * frame1 = (char *) malloc( sizeof(char) * width * height * 3);
/******************************/
/**** TP3 - QUESTION 4 ****/
/******************************/
char * cuda_frame_in, * cuda_frame_out;
for(int i=0 ; i<nGPUs ; i++)
{
hipSetDevice(i);
hipMalloc((void **)&cuda_frame_in , sizeof(char) * width * height * 3);
hipMalloc((void **)&cuda_frame_out, sizeof(char) * width * height * 3);
}
hipSetDevice(0);
for(cpt_frame = 0; cpt_frame < 500 && cpt_frame < frame_count; cpt_frame ++)
{
printf("%d - Read frame with index\n", cpt_frame); fflush(stdout);
readFrame_with_index(frame1, cpt_frame);
dd origin [email protected]:Germainf/APM.git if(cpt_frame > 200 && cpt_frame < 300)
{
hipSetDevice(cpt_frame%nGPUs);
printf("%d - GREY\n", cpt_frame); fflush(stdout);
hipMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, hipMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = height/4;
dim3 myblock;
myblock.x = width/4;
hipLaunchKernelGGL(( kernel_grey), dim3(mygrid), dim3(myblock), 0, 0, cuda_frame_in, height, width);
hipMemcpy(frame1, cuda_frame_in, sizeof(char) * width * height * 3, hipMemcpyDeviceToHost);
}
if(cpt_frame >= 300 && cpt_frame < 800)
{
printf("%d - SOBEL\n", cpt_frame); fflush(stdout);
hipMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, hipMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = (height)/1;
dim3 myblock;
myblock.x = (width)/16;
hipLaunchKernelGGL(( kernel_sobel), dim3(mygrid), dim3(myblock), 0, 0, cuda_frame_in, cuda_frame_out, height, width);
hipMemcpy(frame1, cuda_frame_out, sizeof(char) * width * height * 3, hipMemcpyDeviceToHost);
}
writeFrame (frame1);
}
printf("ECRITURE VIDEO FINIE\n");
hipFree(cuda_frame_in);
hipFree(cuda_frame_out);
/******************************/
/**** TP3 - FIN QUESTION 4 ****/
/******************************/
free(frame1);
return 0;
}
| fc3ed68dce6bad6be4ece4770389ec37983f9a7c.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include "libvideo.h"
#define SEUIL 50
#define RED 0
#define GREEN 1
#define BLUE 2
__global__ void kernel_grey(char * frame, int height, int width)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int red, green, blue;
for(; i<width * height; i+= ( gridDim.x * blockDim.x))
{
red = frame[i*3 + RED];
green = frame[i*3 + GREEN];
blue = frame[i*3 + BLUE];
float moy = 0.3*red + 0.6*green + 0.1*blue;
frame[i*3 + RED] = (char)moy;
frame[i*3 + GREEN] = (char)moy;
frame[i*3 + BLUE] = (char)moy;
}
}
__global__ void kernel_sobel(char * frame_in, char * frame_out, int height, int width)
{
int i = threadIdx.x+1;
int j = blockIdx.x+1;
float no, n, ne, so, s, se, o, e;
float deltaX, deltaY;
for(; j<height-1; j+=gridDim.x)
{
for(;i<width-1; i+=blockDim.x)
{
no = frame_in[((j+1)*width+(i-1))*3+RED];
n = frame_in[((j+1)*width+(i ))*3+RED];
ne = frame_in[((j+1)*width+(i+1))*3+RED];
so = frame_in[((j-1)*width+(i-1))*3+RED];
s = frame_in[((j-1)*width+(i ))*3+RED];
se = frame_in[((j-1)*width+(i+1))*3+RED];
o = frame_in[((j )*width+(i-1))*3+RED];
e = frame_in[((j )*width+(i+1))*3+RED];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_red = sqrt(deltaX*deltaX + deltaY*deltaY);
no = frame_in[((j+1)*width+(i-1))*3+GREEN];
n = frame_in[((j+1)*width+(i ))*3+GREEN];
ne = frame_in[((j+1)*width+(i+1))*3+GREEN];
so = frame_in[((j-1)*width+(i-1))*3+GREEN];
s = frame_in[((j-1)*width+(i ))*3+GREEN];
se = frame_in[((j-1)*width+(i+1))*3+GREEN];
o = frame_in[((j )*width+(i-1))*3+GREEN];
e = frame_in[((j )*width+(i+1))*3+GREEN];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_green = sqrt(deltaX*deltaX + deltaY*deltaY);
no = frame_in[((j+1)*width+(i-1))*3+BLUE];
n = frame_in[((j+1)*width+(i ))*3+BLUE];
ne = frame_in[((j+1)*width+(i+1))*3+BLUE];
so = frame_in[((j-1)*width+(i-1))*3+BLUE];
s = frame_in[((j-1)*width+(i ))*3+BLUE];
se = frame_in[((j-1)*width+(i+1))*3+BLUE];
o = frame_in[((j )*width+(i-1))*3+BLUE];
e = frame_in[((j )*width+(i+1))*3+BLUE];
deltaX = -no+ne-2*o+2*e-so+se;
deltaY = se+2*s+so-ne-2*n-no;
float val_blue = sqrt(deltaX*deltaX + deltaY*deltaY);
float sobel_val = (val_red + val_green + val_blue)/3;
if(sobel_val > SEUIL)
{
frame_out[(j*width+i)*3+RED] = (char)255;
frame_out[(j*width+i)*3+GREEN] = (char)255;
frame_out[(j*width+i)*3+BLUE] = (char)255;
}
else
{
frame_out[(j*width+i)*3+RED] = (char)0;
frame_out[(j*width+i)*3+GREEN] = (char)0;
frame_out[(j*width+i)*3+BLUE] = (char)0;
}
}
}
}
int main (int argc, char * argv[])
{
int cpt_frame;
int frame_count;
int width, height;
int nGPUs;
cudaGetDeviceCount(&nGPUs);
printf("Opening videos - read and write\n"); fflush(stdout);
OpenReadAndWriteVideo("./Wildlife.wmv", "./Wildlife_sobel.wmv");
printf("----------------------------------------\n");
frame_count = getFrameCount();
width = getWidth();
height = getHeight();
printf("Frame count = %d\n", frame_count); fflush(stdout);
printf("Width of frames: %d\n", width); fflush(stdout);
printf("Height of frames: %d\n", height); fflush(stdout);
// char * frames = (char *) malloc( sizeof(char) * frame_count * width * height * 3);
char * frame1 = (char *) malloc( sizeof(char) * width * height * 3);
/******************************/
/**** TP3 - QUESTION 4 ****/
/******************************/
char * cuda_frame_in, * cuda_frame_out;
for(int i=0 ; i<nGPUs ; i++)
{
cudaSetDevice(i);
cudaMalloc((void **)&cuda_frame_in , sizeof(char) * width * height * 3);
cudaMalloc((void **)&cuda_frame_out, sizeof(char) * width * height * 3);
}
cudaSetDevice(0);
for(cpt_frame = 0; cpt_frame < 500 && cpt_frame < frame_count; cpt_frame ++)
{
printf("%d - Read frame with index\n", cpt_frame); fflush(stdout);
readFrame_with_index(frame1, cpt_frame);
dd origin [email protected]:Germainf/APM.git if(cpt_frame > 200 && cpt_frame < 300)
{
cudaSetDevice(cpt_frame%nGPUs);
printf("%d - GREY\n", cpt_frame); fflush(stdout);
cudaMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, cudaMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = height/4;
dim3 myblock;
myblock.x = width/4;
kernel_grey<<<mygrid, myblock>>>(cuda_frame_in, height, width);
cudaMemcpy(frame1, cuda_frame_in, sizeof(char) * width * height * 3, cudaMemcpyDeviceToHost);
}
if(cpt_frame >= 300 && cpt_frame < 800)
{
printf("%d - SOBEL\n", cpt_frame); fflush(stdout);
cudaMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, cudaMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = (height)/1;
dim3 myblock;
myblock.x = (width)/16;
kernel_sobel<<<mygrid, myblock>>>(cuda_frame_in, cuda_frame_out, height, width);
cudaMemcpy(frame1, cuda_frame_out, sizeof(char) * width * height * 3, cudaMemcpyDeviceToHost);
}
writeFrame (frame1);
}
printf("ECRITURE VIDEO FINIE\n");
cudaFree(cuda_frame_in);
cudaFree(cuda_frame_out);
/******************************/
/**** TP3 - FIN QUESTION 4 ****/
/******************************/
free(frame1);
return 0;
}
|
e88f215f600068d48237b471eb243f6ef658145d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Resize.h>
#include <c10/util/accumulate.h>
#include <THH/THHGeneral.h>
#include <THH/THHNumerics.cuh>
#include <ATen/hip/cub.cuh>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim_with_indices<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
bool copy_values = !values.is_contiguous();
bool copy_indices = !indices.is_contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op);
}
if (copy_values){
values.copy_(values_);
}
if (copy_indices){
indices.copy_(indices_);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
bool copy_result = !result.is_contiguous();
Tensor result_ = result.contiguous();
if (self.numel() == self.size(dim)) {
cuda::cub::inclusive_scan(self_.data_ptr<scalar_t>(), result_.data_ptr<scalar_t>(), binary_op, self.numel());
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result_, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op);
}
if (copy_result) {
result.copy_(result_);
}
}
Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU(__func__, {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(self, dim, result);
}
Tensor& _cumsum_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU(__func__, {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
at::native::resize_output(result, self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_cuda",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumsum_out_cuda(self, dim, result);
}
Tensor& _cumprod_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU(__func__, {output_arg, input_arg});
checkSameType(__func__, output_arg, input_arg);
at::native::resize_output(result, self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumprod_out_cuda(self, dim, result);
}
}} // namespace at::native
| e88f215f600068d48237b471eb243f6ef658145d.cu | #include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Resize.h>
#include <c10/util/accumulate.h>
#include <THC/THCGeneral.h>
#include <THC/THCNumerics.cuh>
#include <ATen/cuda/cub.cuh>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
bool copy_values = !values.is_contiguous();
bool copy_indices = !indices.is_contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op);
}
if (copy_values){
values.copy_(values_);
}
if (copy_indices){
indices.copy_(indices_);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_innermost_dim<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
bool copy_result = !result.is_contiguous();
Tensor result_ = result.contiguous();
if (self.numel() == self.size(dim)) {
cuda::cub::inclusive_scan(self_.data_ptr<scalar_t>(), result_.data_ptr<scalar_t>(), binary_op, self.numel());
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result_, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op);
}
if (copy_result) {
result.copy_(result_);
}
}
Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU(__func__, {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(self, dim, result);
}
Tensor& _cumsum_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU(__func__, {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
at::native::resize_output(result, self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_cuda",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumsum_out_cuda(self, dim, result);
}
Tensor& _cumprod_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU(__func__, {output_arg, input_arg});
checkSameType(__func__, output_arg, input_arg);
at::native::resize_output(result, self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumprod_out_cuda(self, dim, result);
}
}} // namespace at::native
|
a7a8e5ca023d5c3a22b836ce6dff34b7f1b8adb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hipfft.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "../inc/g_array.h"
typedef struct{
int n_big;
int sub_sz;
int phase_sz;
int image_sz;
int subx;
int suby;
float* d_phase;
int* d_pupil;
float* d_image;
hipfftComplex* d_fft_input;
hipfftComplex* d_fft_output;
hipfftHandle fftplanfwd;
} CCDPLAN;
int g_n_ccd = 0;
CCDPLAN** ccds;
void ccd_plan(int n_big, int sub_sz, int phase_sz, int image_sz, int subx, int suby,
int* pupil){
g_n_ccd += 1;
CCDPLAN** ccds0 = ccds;
ccds = (CCDPLAN**) malloc(sizeof(CCDPLAN*));
for(int i = 0; i < g_n_ccd - 1; i++){
ccds[i] = ccds0[i];
}
CCDPLAN* cp = (CCDPLAN *) malloc(sizeof(CCDPLAN));
ccds[g_n_ccd - 1] = cp;
cp->n_big = n_big;
cp->sub_sz = sub_sz;
cp->phase_sz = phase_sz;
cp->image_sz = image_sz;
cp->subx = subx;
cp->suby = suby;
checkCudaErrors(
hipMalloc((void**)&cp->d_phase, phase_sz * phase_sz * sizeof(float)));
hipMalloc((void**)&cp->d_fft_input, n_big * n_big * sizeof(hipfftComplex));
hipMemset(cp->d_fft_input, 0, n_big * n_big * sizeof(hipfftComplex));
hipMalloc((void**)&cp->d_fft_output, n_big * n_big * sizeof(hipfftComplex));
hipMalloc((void**)&cp->d_pupil, sub_sz * sub_sz * sizeof(int));
hipMalloc((void**)&cp->d_image, image_sz * image_sz * sizeof(int));
hipMemcpy(cp->d_pupil, pupil, sub_sz * sub_sz * sizeof(int), hipMemcpyHostToDevice);
hipfftPlan2d(&(cp->fftplanfwd), n_big, n_big, HIPFFT_C2C);
}
__global__ void setFFTInput(hipfftComplex* fft_out, float* phase, int* pupil, int subx, int suby,
int n_big, int sub_sz, int phase_sz){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i < sub_sz * sub_sz){
int ix = i / sub_sz;
int iy = i % sub_sz;
int ip = ix * phase_sz + iy;
fft_out[ix * n_big + iy].x = cos(phase[ip]) * pupil[i];
fft_out[ix * n_big + iy].y = sin(phase[ip]) * pupil[i];
}
}
__global__ void getFFTimg(float* img, hipfftComplex* fft_output,
int image_sz, int n_big){
int i = blockDim.x *blockIdx.x + threadIdx.x;
int fx;
int fy;
if(i < image_sz * image_sz){
int ix = i / image_sz;
int iy = i % image_sz;
if(ix >= image_sz / 2){
fx = ix - image_sz/2;
}else{
fx = n_big + ix - image_sz/2;
}
if(iy >= image_sz / 2){
fy = iy - image_sz / 2;
}else{
fy = n_big + iy - image_sz / 2;
}
img[i] = fft_output[fy + fx * n_big].x * fft_output[fy + fx * n_big].x;
img[i] += fft_output[fy + fx * n_big].y * fft_output[fy + fx * n_big].y;
}
}
void ccd_run_single(float* image, float* phase, int ccd_id){
CCDPLAN* cp = ccds[ccd_id];
checkCudaErrors(
hipMemcpy(cp->d_phase, phase, cp->phase_sz * cp->phase_sz * sizeof(float), hipMemcpyHostToDevice));
int threadsPerBlock = 128;
int blocksPerGrid = (cp->sub_sz * cp->sub_sz + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( setFFTInput), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, cp->d_fft_input, cp->d_phase, cp->d_pupil,
cp->subx, cp->suby, cp->n_big, cp->sub_sz, cp->phase_sz);
hipDeviceSynchronize();
checkCudaErrors(
hipfftExecC2C(cp->fftplanfwd, cp->d_fft_input, cp->d_fft_output, HIPFFT_FORWARD));
blocksPerGrid = (cp->image_sz * cp->image_sz + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( getFFTimg), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, cp->d_image, cp->d_fft_output, cp->image_sz, cp->n_big);
hipDeviceSynchronize();
// hipfftComplex* fft_input = (hipfftComplex*) malloc(cp->n_big * cp->n_big * sizeof(hipfftComplex));
// hipMemcpy(fft_input, cp->d_fft_output, cp->n_big * cp->n_big * sizeof(hipfftComplex), hipMemcpyDeviceToHost);
// ARRAY* fftres = array_zeros(2, cp->n_big, cp->n_big);
// for(int i = 0; i < fftres->size; i++){
// fftres->data[i] = fft_input[i].x * fft_input[i].x + fft_input[i].y * fft_input[i].y;
// }
checkCudaErrors(
hipMemcpy(image, cp->d_image, cp->image_sz * cp->image_sz * sizeof(float), hipMemcpyDeviceToHost));
hipDeviceSynchronize();
}
int main(){
int sub_sz = 30;
int phase_sz = 40;
int pupil[sub_sz * sub_sz];
int n_big = 400;
for(int i = 0; i < sub_sz * sub_sz; i++){
pupil[i] = 1;
}
int image_sz = 51;
int subx = 5;
int suby = 5;
ccd_plan(n_big, sub_sz, phase_sz, image_sz, subx, suby, pupil);
ccd_plan(n_big*2, sub_sz, phase_sz, image_sz, subx, suby, pupil);
float phase[phase_sz * phase_sz];
for(int i = 0; i < phase_sz * phase_sz; i++){
phase[i] = 0;
}
ARRAY * image = array_zeros(2, image_sz, image_sz);
ccd_run_single(image->data, phase, 0);
ccd_run_single(image->data, phase, 1);
FILE *fp;
if(!(fp = fopen("output.bin", "wb"))){
printf("array file error!");
}
array_save(fp, image);
fclose(fp);
} | a7a8e5ca023d5c3a22b836ce6dff34b7f1b8adb5.cu | #include <stdio.h>
#include <cufft.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "../inc/g_array.h"
typedef struct{
int n_big;
int sub_sz;
int phase_sz;
int image_sz;
int subx;
int suby;
float* d_phase;
int* d_pupil;
float* d_image;
cufftComplex* d_fft_input;
cufftComplex* d_fft_output;
cufftHandle fftplanfwd;
} CCDPLAN;
int g_n_ccd = 0;
CCDPLAN** ccds;
void ccd_plan(int n_big, int sub_sz, int phase_sz, int image_sz, int subx, int suby,
int* pupil){
g_n_ccd += 1;
CCDPLAN** ccds0 = ccds;
ccds = (CCDPLAN**) malloc(sizeof(CCDPLAN*));
for(int i = 0; i < g_n_ccd - 1; i++){
ccds[i] = ccds0[i];
}
CCDPLAN* cp = (CCDPLAN *) malloc(sizeof(CCDPLAN));
ccds[g_n_ccd - 1] = cp;
cp->n_big = n_big;
cp->sub_sz = sub_sz;
cp->phase_sz = phase_sz;
cp->image_sz = image_sz;
cp->subx = subx;
cp->suby = suby;
checkCudaErrors(
cudaMalloc((void**)&cp->d_phase, phase_sz * phase_sz * sizeof(float)));
cudaMalloc((void**)&cp->d_fft_input, n_big * n_big * sizeof(cufftComplex));
cudaMemset(cp->d_fft_input, 0, n_big * n_big * sizeof(cufftComplex));
cudaMalloc((void**)&cp->d_fft_output, n_big * n_big * sizeof(cufftComplex));
cudaMalloc((void**)&cp->d_pupil, sub_sz * sub_sz * sizeof(int));
cudaMalloc((void**)&cp->d_image, image_sz * image_sz * sizeof(int));
cudaMemcpy(cp->d_pupil, pupil, sub_sz * sub_sz * sizeof(int), cudaMemcpyHostToDevice);
cufftPlan2d(&(cp->fftplanfwd), n_big, n_big, CUFFT_C2C);
}
__global__ void setFFTInput(cufftComplex* fft_out, float* phase, int* pupil, int subx, int suby,
int n_big, int sub_sz, int phase_sz){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i < sub_sz * sub_sz){
int ix = i / sub_sz;
int iy = i % sub_sz;
int ip = ix * phase_sz + iy;
fft_out[ix * n_big + iy].x = cos(phase[ip]) * pupil[i];
fft_out[ix * n_big + iy].y = sin(phase[ip]) * pupil[i];
}
}
__global__ void getFFTimg(float* img, cufftComplex* fft_output,
int image_sz, int n_big){
int i = blockDim.x *blockIdx.x + threadIdx.x;
int fx;
int fy;
if(i < image_sz * image_sz){
int ix = i / image_sz;
int iy = i % image_sz;
if(ix >= image_sz / 2){
fx = ix - image_sz/2;
}else{
fx = n_big + ix - image_sz/2;
}
if(iy >= image_sz / 2){
fy = iy - image_sz / 2;
}else{
fy = n_big + iy - image_sz / 2;
}
img[i] = fft_output[fy + fx * n_big].x * fft_output[fy + fx * n_big].x;
img[i] += fft_output[fy + fx * n_big].y * fft_output[fy + fx * n_big].y;
}
}
void ccd_run_single(float* image, float* phase, int ccd_id){
CCDPLAN* cp = ccds[ccd_id];
checkCudaErrors(
cudaMemcpy(cp->d_phase, phase, cp->phase_sz * cp->phase_sz * sizeof(float), cudaMemcpyHostToDevice));
int threadsPerBlock = 128;
int blocksPerGrid = (cp->sub_sz * cp->sub_sz + threadsPerBlock - 1) / threadsPerBlock;
setFFTInput<<<blocksPerGrid, threadsPerBlock>>>(cp->d_fft_input, cp->d_phase, cp->d_pupil,
cp->subx, cp->suby, cp->n_big, cp->sub_sz, cp->phase_sz);
cudaDeviceSynchronize();
checkCudaErrors(
cufftExecC2C(cp->fftplanfwd, cp->d_fft_input, cp->d_fft_output, CUFFT_FORWARD));
blocksPerGrid = (cp->image_sz * cp->image_sz + threadsPerBlock - 1) / threadsPerBlock;
getFFTimg<<<blocksPerGrid, threadsPerBlock>>>(cp->d_image, cp->d_fft_output, cp->image_sz, cp->n_big);
cudaDeviceSynchronize();
// cufftComplex* fft_input = (cufftComplex*) malloc(cp->n_big * cp->n_big * sizeof(cufftComplex));
// cudaMemcpy(fft_input, cp->d_fft_output, cp->n_big * cp->n_big * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
// ARRAY* fftres = array_zeros(2, cp->n_big, cp->n_big);
// for(int i = 0; i < fftres->size; i++){
// fftres->data[i] = fft_input[i].x * fft_input[i].x + fft_input[i].y * fft_input[i].y;
// }
checkCudaErrors(
cudaMemcpy(image, cp->d_image, cp->image_sz * cp->image_sz * sizeof(float), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
}
int main(){
int sub_sz = 30;
int phase_sz = 40;
int pupil[sub_sz * sub_sz];
int n_big = 400;
for(int i = 0; i < sub_sz * sub_sz; i++){
pupil[i] = 1;
}
int image_sz = 51;
int subx = 5;
int suby = 5;
ccd_plan(n_big, sub_sz, phase_sz, image_sz, subx, suby, pupil);
ccd_plan(n_big*2, sub_sz, phase_sz, image_sz, subx, suby, pupil);
float phase[phase_sz * phase_sz];
for(int i = 0; i < phase_sz * phase_sz; i++){
phase[i] = 0;
}
ARRAY * image = array_zeros(2, image_sz, image_sz);
ccd_run_single(image->data, phase, 0);
ccd_run_single(image->data, phase, 1);
FILE *fp;
if(!(fp = fopen("output.bin", "wb"))){
printf("array file error!");
}
array_save(fp, image);
fclose(fp);
} |
9479000a1f456e799e0fd115f87b2592f154e24b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/map_then_reduce.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace raft {
namespace linalg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
raft::myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map,
hipStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, (size_t)TPB);
hipLaunchKernelGGL(( naiveMapReduceKernel<Type, MapOp>)
, dim3(nblks), dim3(TPB), 0, stream, out, in, len, map);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len,
hipStream_t stream) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op, stream);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
auto len = params.len;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
mapReduceLaunch(out_ref, out, in, len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 9479000a1f456e799e0fd115f87b2592f154e24b.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/map_then_reduce.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace raft {
namespace linalg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
raft::myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map,
cudaStream_t stream) {
static const int TPB = 64;
int nblks = raft::ceildiv(len, (size_t)TPB);
naiveMapReduceKernel<Type, MapOp>
<<<nblks, TPB, 0, stream>>>(out, in, len, map);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len,
cudaStream_t stream) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op, stream);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
auto len = params.len;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0), stream);
mapReduceLaunch(out_ref, out, in, len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
|
3ccba220a03d4a3bb8a3931a87e74b1cb47d8115.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <cutil.h>
#include <hip/hip_runtime.h>
__device__ __host__
unsigned int uintFloorLog(unsigned int base, unsigned int num)
{
//unsigned int result = 0;
//for(unsigned int temp = 0; temp <= num; temp *= base)
// result++;
//return result;
return int(logf(float(base))/logf(float(num)));
}
__device__ __host__
unsigned int uintCeilingLog(unsigned int base, unsigned int num)
{
unsigned int result = 0;
for(unsigned int temp = 1; temp < num; temp *= base)
result++;
return result;
}
__device__ __host__
unsigned int uintPower(unsigned int base, unsigned int pow)
{
unsigned int result = 1;
for(; pow; pow--)
result *= base;
return result;
}
__device__ __host__
unsigned int uintCeilingDiv(unsigned int dividend, unsigned int divisor)
{
return (dividend + divisor - 1) / divisor;
}
#define divRoundDown(n,s) ((n) / (s))
#define divRoundUp(n,s) (((n) / (s)) + ((((n) % (s)) > 0) ? 1 : 0))
| 3ccba220a03d4a3bb8a3931a87e74b1cb47d8115.cu | #pragma once
#include <cutil.h>
#include <cuda.h>
__device__ __host__
unsigned int uintFloorLog(unsigned int base, unsigned int num)
{
//unsigned int result = 0;
//for(unsigned int temp = 0; temp <= num; temp *= base)
// result++;
//return result;
return int(logf(float(base))/logf(float(num)));
}
__device__ __host__
unsigned int uintCeilingLog(unsigned int base, unsigned int num)
{
unsigned int result = 0;
for(unsigned int temp = 1; temp < num; temp *= base)
result++;
return result;
}
__device__ __host__
unsigned int uintPower(unsigned int base, unsigned int pow)
{
unsigned int result = 1;
for(; pow; pow--)
result *= base;
return result;
}
__device__ __host__
unsigned int uintCeilingDiv(unsigned int dividend, unsigned int divisor)
{
return (dividend + divisor - 1) / divisor;
}
#define divRoundDown(n,s) ((n) / (s))
#define divRoundUp(n,s) (((n) / (s)) + ((((n) % (s)) > 0) ? 1 : 0))
|
393da3b1c2eb04e73a54068d6508e243dafe3a63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <hip/hip_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<half>((static_cast<float>(save_ptr[save_index]) -
static_cast<float>(mean_i)) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1.0),
x_data,
w_data,
static_cast<T>(0.0),
out_data);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_fc_elementwise_layernorm,
ops::FusedFCElementwiseLayerNormOpKernel<phi::dtype::float16>,
ops::FusedFCElementwiseLayerNormOpKernel<float>,
ops::FusedFCElementwiseLayerNormOpKernel<double>);
| 393da3b1c2eb04e73a54068d6508e243dafe3a63.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <cuda_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = cub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<half>((static_cast<float>(save_ptr[save_index]) -
static_cast<float>(mean_i)) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1.0),
x_data,
w_data,
static_cast<T>(0.0),
out_data);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_fc_elementwise_layernorm,
ops::FusedFCElementwiseLayerNormOpKernel<phi::dtype::float16>,
ops::FusedFCElementwiseLayerNormOpKernel<float>,
ops::FusedFCElementwiseLayerNormOpKernel<double>);
|
c02b82518478781ca30bb18a6640cac5f9ba5f02.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* histogram.cu
*
* Microbenchmark for histogram, a statistical computation
* for image processing.
*
* Build with: nvcc -I ../chLib <options> histogram.cu ..\chLib\pgm.cu -lnpp -lpthread -lrt
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> histogram.cu ..\chLib\pgm.cu
*
* Requires: SM 1.1, for global atomics.
*
* Copyright (c) 2013, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <chThread.h>
#include <chTimer.h>
#include <chUtil.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
#include "histogramPerGrid.cuh"
#include "histogramPerBlock.cuh"
#include "histogramPerBlockOffset.cuh"
#include "histogramPerBlockReduce.cuh"
#include "histogramPerThread64.cuh"
#include "histogramPerThread4x64.cuh"
#include "histogramPerThread4x32.cuh"
#include "histogramNPP.cuh"
using namespace cudahandbook::threading;
workerThread *g_CPUThreadPool;
int g_numCPUCores;
int
bCompareHistograms( const unsigned int *p, const unsigned int *q, int N )
{
for ( int i = 0; i < N; i++ ) {
if ( p[i] != q[i] ) {
printf( "Histogram mismatch at %d: p[%d] == %d, q[%d] == %d\n", i, i, p[i], i, q[i] );
return 1;
}
}
return 0;
}
void
histCPU(
unsigned int *pHist,
int w, int h,
unsigned char *img, int imgPitch )
{
memset( pHist, 0, 256*sizeof(int) );
for ( int row = 0; row < h; row += 1 ) {
unsigned char *pi = img+row*imgPitch;
for ( int col = 0; col < w; col += 1 ) {
pHist[pi[col]] += 1;
}
}
}
float
hist1DCPU(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
memset( pHist, 0, 256*sizeof(int) );
for ( size_t i = 0; i < N; i++ ) {
pHist[ p[i] ] += 1;
}
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
struct histDelegation {
// input data for this thread only
unsigned char *pData;
size_t N;
// output histogram for this thread only
unsigned int privateHist[256];
};
static void
histWorkerThread( void *_p )
{
histDelegation *p = (histDelegation *) _p;
unsigned char *pData = p->pData;
memset( p->privateHist, 0, sizeof(p->privateHist) );
for (size_t i = 0; i < p->N; i++ ) {
p->privateHist[ pData[i] ] += 1;
}
}
float
hist1DCPU_threaded(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
histDelegation *phist = new histDelegation[ g_numCPUCores ];
size_t elementsPerCore = INTDIVIDE_CEILING( N, g_numCPUCores );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
phist[i].pData = p;
phist[i].N = (N) ? elementsPerCore : 0;
p += elementsPerCore;
N -= elementsPerCore;
g_CPUThreadPool[i].delegateAsynchronous(
histWorkerThread,
&phist[i] );
}
workerThread::waitAll( g_CPUThreadPool, g_numCPUCores );
memset( pHist, 0, 256*sizeof(unsigned int) );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
for ( int j = 0; j < 256; j++ ) {
pHist[j] += phist[i].privateHist[j];
}
}
delete[] phist;
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
bool
TestHistogram(
double *pixelsPerSecond, // passback to report performance
const char *name,
const unsigned char *dptrBase, size_t dPitch,
int w, int h, // width and height of input
const unsigned int *hrefHist, // host reference data
dim3 threads,
void (*pfnHistogram)(
float *ms,
unsigned int *pHist,
const unsigned char *dptrBase, size_t dPitch,
int xUL, int yUL, int w, int h,
dim3 threads ),
int cIterations = 1,
const char *outputFilename = NULL
)
{
hipError_t status;
bool ret = false;
// Histogram for 8-bit grayscale image (2^8=256)
unsigned int hHist[256];
unsigned int *dHist = NULL;
float ms;
CUDART_CHECK( hipMalloc( (void **) &dHist, 256*sizeof(int) ) );
CUDART_CHECK( hipMemset( dHist, 0, 256*sizeof(int) ) );
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
CUDART_CHECK( hipMemcpy( hHist, dHist, sizeof(hHist), hipMemcpyDeviceToHost ) );
if ( bCompareHistograms( hHist, hrefHist, 256 ) ) {
printf( "%s: Histograms miscompare\n", name );
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
}
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
CUDART_CHECK( hipMemcpy( hHist, dHist, sizeof(hHist), hipMemcpyDeviceToHost ) );
if ( outputFilename ) {
FILE *f = fopen( outputFilename, "w" );
if ( ! f )
goto Error;
for ( int i = 0; i < 256; i++ ) {
fprintf( f, "%d\t", hHist[i] );
}
fprintf( f, "\n" );
fclose( f );
}
ret = true;
Error:
hipFree( dHist );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
hipError_t status;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
unsigned int cpuHist[256];
unsigned int HostPitch, DevicePitch;
int w, h;
bool bTesla = false;
dim3 threads;
char *inputFilename = "coins.pgm";
char *outputFilename = NULL;
hipArray *pArrayImage = NULL;
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --random <numvalues>: overrides input filename and fills image with random data in the range [0..numvalues)\n" );
printf( " --stride <value>: specifies stride for random values (e.g., 2 means use even values only)\n" );
printf( " The random parameter must be in the range 1..256, and random/stride must be 256 or less.\n" );
printf( "\nDefault values are coins.pgm and no output file or padding\n" );
return 0;
}
CUDART_CHECK( hipSetDeviceFlags( hipDeviceMapHost ) );
CUDART_CHECK( hipDeviceSetCacheConfig( hipFuncCachePreferShared ) );
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 1024;//0;
int padHeight = 1024;//0;
int numvalues = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( chCommandLineGet( &numvalues, "random", argc, argv ) ) {
int stride = 1;
if ( chCommandLineGet( &stride, "stride", argc, argv ) ) {
if ( numvalues*stride > 256 ) {
printf( "stride*random must be <= 256\n" );
goto Error;
}
}
if ( 0==padWidth || 0==padHeight ) {
printf( "--random requires --padWidth and padHeight (to specify input size)\n" );
goto Error;
}
printf( "%d pixels, random, %d values with stride %d\n",
padWidth*padHeight, numvalues, stride );
w = padWidth;
h = padWidth;
hidata = (unsigned char *) malloc( w*h );
if ( ! hidata )
goto Error;
size_t dPitch;
CUDART_CHECK( hipMallocPitch( &didata, &dPitch, padWidth, padHeight ) );
DevicePitch = dPitch;
srand(time(NULL));
for ( int row = 0; row < h; row++ ) {
unsigned char *p = hidata+row*w;
for ( int col = 0; col < w; col++ ) {
int val = rand() % numvalues;
val *= stride;
p[col] = (unsigned char) val;
}
}
CUDART_CHECK( hipMemcpy2D( didata, DevicePitch, hidata, padWidth, padWidth, padHeight, hipMemcpyHostToDevice ) );
}
else {
if ( pgmLoad( inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) )
goto Error;
printf( "%d pixels, sourced from image file %s\n", w*h, inputFilename );
}
}
CUDART_CHECK( hipMallocArray( &pArrayImage, &desc, w, h ) );
CUDART_CHECK( hipMemcpyToArray( pArrayImage, 0, 0, hidata, w*h, hipMemcpyHostToDevice ) );
CUDART_CHECK( hipBindTextureToArray( texImage, pArrayImage ) );
{
hipDeviceProp_t prop;
CUDART_CHECK( hipGetDeviceProperties( &prop, 0 ) );
if ( prop.major < 2 ) {
bTesla = true;
}
}
histCPU( cpuHist, w, h, hidata, w );
{
unsigned int cpuHist2[256], cpuHist3[256];
float timeST = hist1DCPU( cpuHist2, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist2, 256 ) ) {
printf( "Linear and 2D histograms do not agree\n" );
exit(1);
}
printf("Single-threaded: %.2f Mpix/s\n", w*h/timeST/1e3 );
float timeMT = hist1DCPU_threaded( cpuHist3, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist3, 256 ) ) {
printf( "Multithreaded and 2D histograms do not agree\n" );
exit(1);
}
double pixPerms = w*h/timeMT;
printf( "Multithreaded (%d cores) is %.2fx faster (%.2f Mpix/s)\n",
g_numCPUCores,
timeST/timeMT,
pixPerms/1e3 );
}
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
if ( ! TestHistogram( &pixelsPerSecond, \
#baseName, \
didata, DevicePitch, \
w, h, \
cpuHist, \
threads, \
baseName, \
cIterations, outfile ) ) { \
printf( "Error\n" ); \
ret = 1; \
goto Error; \
} \
printf( "%s: %.2f Mpix/s\n", \
#baseName, pixelsPerSecond/1e6 ); \
}
if ( w != DevicePitch ) {
printf( "1D versions only work if width and pitch are the same\n" );
}
threads = dim3( 32, 8, 1 );
TEST_VECTOR( GPUhistogramPerGrid, false, 1, NULL );
threads = dim3( 16, 4, 1 );
TEST_VECTOR( GPUhistogramPerBlock, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4x, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4xOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduce, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduceOffset, false, 1, NULL );
threads = dim3( 16, 4, 1 );
if ( ! bTesla ) {
TEST_VECTOR( GPUhistogramPerThread64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64_PeriodicMerge, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32_PeriodicMerge, false, 1, NULL );
}
TEST_VECTOR( GPUhistogramNPP, false, 1, NULL );
ret = 0;
Error:
free( hidata );
hipFree(didata);
hipFreeArray(pArrayImage);
return ret;
}
| c02b82518478781ca30bb18a6640cac5f9ba5f02.cu | /*
*
* histogram.cu
*
* Microbenchmark for histogram, a statistical computation
* for image processing.
*
* Build with: nvcc -I ../chLib <options> histogram.cu ..\chLib\pgm.cu -lnpp -lpthread -lrt
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> histogram.cu ..\chLib\pgm.cu
*
* Requires: SM 1.1, for global atomics.
*
* Copyright (c) 2013, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <chThread.h>
#include <chTimer.h>
#include <chUtil.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
#include "histogramPerGrid.cuh"
#include "histogramPerBlock.cuh"
#include "histogramPerBlockOffset.cuh"
#include "histogramPerBlockReduce.cuh"
#include "histogramPerThread64.cuh"
#include "histogramPerThread4x64.cuh"
#include "histogramPerThread4x32.cuh"
#include "histogramNPP.cuh"
using namespace cudahandbook::threading;
workerThread *g_CPUThreadPool;
int g_numCPUCores;
int
bCompareHistograms( const unsigned int *p, const unsigned int *q, int N )
{
for ( int i = 0; i < N; i++ ) {
if ( p[i] != q[i] ) {
printf( "Histogram mismatch at %d: p[%d] == %d, q[%d] == %d\n", i, i, p[i], i, q[i] );
return 1;
}
}
return 0;
}
void
histCPU(
unsigned int *pHist,
int w, int h,
unsigned char *img, int imgPitch )
{
memset( pHist, 0, 256*sizeof(int) );
for ( int row = 0; row < h; row += 1 ) {
unsigned char *pi = img+row*imgPitch;
for ( int col = 0; col < w; col += 1 ) {
pHist[pi[col]] += 1;
}
}
}
float
hist1DCPU(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
memset( pHist, 0, 256*sizeof(int) );
for ( size_t i = 0; i < N; i++ ) {
pHist[ p[i] ] += 1;
}
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
struct histDelegation {
// input data for this thread only
unsigned char *pData;
size_t N;
// output histogram for this thread only
unsigned int privateHist[256];
};
static void
histWorkerThread( void *_p )
{
histDelegation *p = (histDelegation *) _p;
unsigned char *pData = p->pData;
memset( p->privateHist, 0, sizeof(p->privateHist) );
for (size_t i = 0; i < p->N; i++ ) {
p->privateHist[ pData[i] ] += 1;
}
}
float
hist1DCPU_threaded(
unsigned int *pHist,
unsigned char *p, size_t N )
{
chTimerTimestamp start, end;
chTimerGetTime( &start );
histDelegation *phist = new histDelegation[ g_numCPUCores ];
size_t elementsPerCore = INTDIVIDE_CEILING( N, g_numCPUCores );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
phist[i].pData = p;
phist[i].N = (N) ? elementsPerCore : 0;
p += elementsPerCore;
N -= elementsPerCore;
g_CPUThreadPool[i].delegateAsynchronous(
histWorkerThread,
&phist[i] );
}
workerThread::waitAll( g_CPUThreadPool, g_numCPUCores );
memset( pHist, 0, 256*sizeof(unsigned int) );
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
for ( int j = 0; j < 256; j++ ) {
pHist[j] += phist[i].privateHist[j];
}
}
delete[] phist;
chTimerGetTime( &end );
return (float) chTimerElapsedTime( &start, &end ) * 1000.0f;
}
bool
TestHistogram(
double *pixelsPerSecond, // passback to report performance
const char *name,
const unsigned char *dptrBase, size_t dPitch,
int w, int h, // width and height of input
const unsigned int *hrefHist, // host reference data
dim3 threads,
void (*pfnHistogram)(
float *ms,
unsigned int *pHist,
const unsigned char *dptrBase, size_t dPitch,
int xUL, int yUL, int w, int h,
dim3 threads ),
int cIterations = 1,
const char *outputFilename = NULL
)
{
cudaError_t status;
bool ret = false;
// Histogram for 8-bit grayscale image (2^8=256)
unsigned int hHist[256];
unsigned int *dHist = NULL;
float ms;
CUDART_CHECK( cudaMalloc( (void **) &dHist, 256*sizeof(int) ) );
CUDART_CHECK( cudaMemset( dHist, 0, 256*sizeof(int) ) );
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
CUDART_CHECK( cudaMemcpy( hHist, dHist, sizeof(hHist), cudaMemcpyDeviceToHost ) );
if ( bCompareHistograms( hHist, hrefHist, 256 ) ) {
printf( "%s: Histograms miscompare\n", name );
goto Error;
}
for ( int i = 0; i < cIterations; i++ ) {
pfnHistogram( &ms, dHist, dptrBase, dPitch, 0, 0, w, h, threads );
}
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
CUDART_CHECK( cudaMemcpy( hHist, dHist, sizeof(hHist), cudaMemcpyDeviceToHost ) );
if ( outputFilename ) {
FILE *f = fopen( outputFilename, "w" );
if ( ! f )
goto Error;
for ( int i = 0; i < 256; i++ ) {
fprintf( f, "%d\t", hHist[i] );
}
fprintf( f, "\n" );
fclose( f );
}
ret = true;
Error:
cudaFree( dHist );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
cudaError_t status;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
unsigned int cpuHist[256];
unsigned int HostPitch, DevicePitch;
int w, h;
bool bTesla = false;
dim3 threads;
char *inputFilename = "coins.pgm";
char *outputFilename = NULL;
cudaArray *pArrayImage = NULL;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --random <numvalues>: overrides input filename and fills image with random data in the range [0..numvalues)\n" );
printf( " --stride <value>: specifies stride for random values (e.g., 2 means use even values only)\n" );
printf( " The random parameter must be in the range 1..256, and random/stride must be 256 or less.\n" );
printf( "\nDefault values are coins.pgm and no output file or padding\n" );
return 0;
}
CUDART_CHECK( cudaSetDeviceFlags( cudaDeviceMapHost ) );
CUDART_CHECK( cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ) );
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 1024;//0;
int padHeight = 1024;//0;
int numvalues = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( chCommandLineGet( &numvalues, "random", argc, argv ) ) {
int stride = 1;
if ( chCommandLineGet( &stride, "stride", argc, argv ) ) {
if ( numvalues*stride > 256 ) {
printf( "stride*random must be <= 256\n" );
goto Error;
}
}
if ( 0==padWidth || 0==padHeight ) {
printf( "--random requires --padWidth and padHeight (to specify input size)\n" );
goto Error;
}
printf( "%d pixels, random, %d values with stride %d\n",
padWidth*padHeight, numvalues, stride );
w = padWidth;
h = padWidth;
hidata = (unsigned char *) malloc( w*h );
if ( ! hidata )
goto Error;
size_t dPitch;
CUDART_CHECK( cudaMallocPitch( &didata, &dPitch, padWidth, padHeight ) );
DevicePitch = dPitch;
srand(time(NULL));
for ( int row = 0; row < h; row++ ) {
unsigned char *p = hidata+row*w;
for ( int col = 0; col < w; col++ ) {
int val = rand() % numvalues;
val *= stride;
p[col] = (unsigned char) val;
}
}
CUDART_CHECK( cudaMemcpy2D( didata, DevicePitch, hidata, padWidth, padWidth, padHeight, cudaMemcpyHostToDevice ) );
}
else {
if ( pgmLoad( inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) )
goto Error;
printf( "%d pixels, sourced from image file %s\n", w*h, inputFilename );
}
}
CUDART_CHECK( cudaMallocArray( &pArrayImage, &desc, w, h ) );
CUDART_CHECK( cudaMemcpyToArray( pArrayImage, 0, 0, hidata, w*h, cudaMemcpyHostToDevice ) );
CUDART_CHECK( cudaBindTextureToArray( texImage, pArrayImage ) );
{
cudaDeviceProp prop;
CUDART_CHECK( cudaGetDeviceProperties( &prop, 0 ) );
if ( prop.major < 2 ) {
bTesla = true;
}
}
histCPU( cpuHist, w, h, hidata, w );
{
unsigned int cpuHist2[256], cpuHist3[256];
float timeST = hist1DCPU( cpuHist2, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist2, 256 ) ) {
printf( "Linear and 2D histograms do not agree\n" );
exit(1);
}
printf("Single-threaded: %.2f Mpix/s\n", w*h/timeST/1e3 );
float timeMT = hist1DCPU_threaded( cpuHist3, hidata, w*h );
if ( bCompareHistograms( cpuHist, cpuHist3, 256 ) ) {
printf( "Multithreaded and 2D histograms do not agree\n" );
exit(1);
}
double pixPerms = w*h/timeMT;
printf( "Multithreaded (%d cores) is %.2fx faster (%.2f Mpix/s)\n",
g_numCPUCores,
timeST/timeMT,
pixPerms/1e3 );
}
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
if ( ! TestHistogram( &pixelsPerSecond, \
#baseName, \
didata, DevicePitch, \
w, h, \
cpuHist, \
threads, \
baseName, \
cIterations, outfile ) ) { \
printf( "Error\n" ); \
ret = 1; \
goto Error; \
} \
printf( "%s: %.2f Mpix/s\n", \
#baseName, pixelsPerSecond/1e6 ); \
}
if ( w != DevicePitch ) {
printf( "1D versions only work if width and pitch are the same\n" );
}
threads = dim3( 32, 8, 1 );
TEST_VECTOR( GPUhistogramPerGrid, false, 1, NULL );
threads = dim3( 16, 4, 1 );
TEST_VECTOR( GPUhistogramPerBlock, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4x, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlock4xOffset, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduce, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerBlockReduceOffset, false, 1, NULL );
threads = dim3( 16, 4, 1 );
if ( ! bTesla ) {
TEST_VECTOR( GPUhistogramPerThread64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x64_PeriodicMerge, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32, false, 1, NULL );
TEST_VECTOR( GPUhistogramPerThread4x32_PeriodicMerge, false, 1, NULL );
}
TEST_VECTOR( GPUhistogramNPP, false, 1, NULL );
ret = 0;
Error:
free( hidata );
cudaFree(didata);
cudaFreeArray(pArrayImage);
return ret;
}
|
87cefcf852ce722a2440dfb151f631d21fe36520.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Adapted from https://github.com/abadams/permutohedral
which has the following license...
MIT License
Copyright (c) 2020 Andrew Adams
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define BLOCK_SIZE 32
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <torch/extension.h>
#include <THH/THHAtomics.cuh>
#include "hash_table.cuh"
#include "permutohedral.h"
#include "utils/meta_macros.h"
template <typename scalar_t>
struct MatrixEntry {
int index;
scalar_t weight;
};
template <typename scalar_t, int pd>
__global__ static void createMatrix(
const int elementCount,
const scalar_t* positions,
const scalar_t* values,
const scalar_t* scaleFactor,
MatrixEntry<scalar_t>* matrix) {
const int threadId = threadIdx.x;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
scalar_t myElevated[pd + 1];
const scalar_t* myPosition = positions + idx * pd;
int myGreedy[pd + 1];
int myRank[pd + 1];
scalar_t myBarycentric[pd + 2];
__shared__ short keys[pd * BLOCK_SIZE];
short* myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1];
for (int i = pd - 1; i > 0; i--) {
myElevated[i] =
myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i];
}
myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
scalar_t v = myElevated[i] * (1.0f / (pd + 1));
scalar_t up = ceilf(v) * (pd + 1);
scalar_t down = floorf(v) * (pd + 1);
myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down);
sum += myGreedy[i];
}
sum /= pd + 1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
scalar_t iDiff = myElevated[i] - myGreedy[i];
scalar_t jDiff = myElevated[j] - myGreedy[j];
if (iDiff < jDiff || (iDiff == jDiff && i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) // sum too large, need to bring down the ones with the smallest differential
{
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= (pd + 1);
myRank[i] += sum - (pd + 1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) // sum too small, need to bring up the ones with largest differential
{
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += (pd + 1);
myRank[i] += sum + (pd + 1);
} else {
myRank[i] += sum;
}
}
}
#ifdef LINEAR_D_MEMORY
for (int i = 0; i <= pd; i++) {
table_zeros[idx * (pd + 1) + i] = myGreedy[i];
table_rank[idx * (pd + 1) + i] = myRank[i];
}
#endif
// turn delta into barycentric coords
for (int i = 0; i <= pd + 1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1));
myBarycentric[pd - myRank[i]] += delta;
myBarycentric[pd + 1 - myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd + 1];
}
#ifdef USE_ADDITIVE_HASH
unsigned int cumulative_hash = hash<pd>(myGreedy);
#endif
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd - color) {
myKey[i] -= (pd + 1);
}
}
}
#ifdef USE_ADDITIVE_HASH
for (int i = 0; i < pd; i++) {
if (myRank[i] == pd - color) {
cumulative_hash += hOffset[i];
}
}
#endif
if (!outOfBounds) {
MatrixEntry<scalar_t> r;
#ifdef USE_ADDITIVE_HASH
r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color);
#else
r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color);
#endif
r.weight = myBarycentric[color];
matrix[idx * (pd + 1) + color] = r;
}
}
}
template <typename scalar_t, int kd>
__global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= elementCount)
return;
// find my hash table entry
int* e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
#ifdef LINEAR_D_MEMORY
// Get my key
short myKey[kd];
generateKey<kd>(*e, myKey);
*e = hashTableRetrieve<kd>(myKey);
#else
*e = hashTableRetrieve<kd>(table_keys + *e * kd);
#endif
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void splat(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
const int color = threadIdx.y;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const bool outOfBounds = idx >= elementCount;
if (outOfBounds) {
return;
}
scalar_t* myValue = values + idx * vd;
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
scalar_t* val = table_values + r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
gpuAtomicAdd(val + j, myValue[j] * r.weight);
}
gpuAtomicAdd(val + vd, r.weight);
}
// splat splits by color, so extend the y coordinate to our blocks to represent that
// dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1);
// dim3 oldblockSize(8, 8, 1);
// oldblocks.y *= pd+1;
// splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix);
// int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
// int blockSize = BLOCK_SIZE;
// splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix);
template <typename scalar_t, int pd, int vd>
__global__ static void splatCache(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
// const int x = threadIdx.x + blockIdx.x * blockDim.x;
// const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
// const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
// const int color = blockIdx.y % (pd+1);
// const int idx = y*w + x;
const int threadId = threadIdx.x;
const int color = threadIdx.y;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
__shared__ int sharedOffsets[BLOCK_SIZE];
__shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)];
int myOffset = -1;
scalar_t* myValue = sharedValues + threadId * (vd + 1);
if (!outOfBounds) {
scalar_t* value = values + idx * vd;
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
myValue[j] = value[j] * r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds)
return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
scalar_t* val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
gpuAtomicAdd(val + j, myValue[j]);
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void blur(
int n,
scalar_t* newValues,
MatrixEntry<scalar_t>* matrix,
int color,
scalar_t* table_values) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n)
return;
// Check if I'm valid
if (matrix[idx].index != idx)
return;
// find my key and the keys of my neighbours
short myKey[pd + 1];
short np[pd + 1];
short nm[pd + 1];
#ifdef LINEAR_D_MEMORY
generateKey<pd>(idx, myKey);
for (int i = 0; i < pd; i++) {
np[i] = myKey[i] + 1;
nm[i] = myKey[i] - 1;
}
#else
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx * pd + i];
np[i] = myKey[i] + 1;
nm[i] = myKey[i] - 1;
}
#endif
np[color] -= pd + 1;
nm[color] += pd + 1;
#ifdef USE_ADDITIVE_HASH
unsigned int hCurrent = hash<pd>(myKey);
int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np);
int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm);
#else
int offNp = hashTableRetrieve<pd>(np);
int offNm = hashTableRetrieve<pd>(nm);
#endif
scalar_t* valMe = table_values + (vd + 1) * idx;
scalar_t* valNp = table_values + (vd + 1) * offNp;
scalar_t* valNm = table_values + (vd + 1) * offNm;
scalar_t* valOut = newValues + (vd + 1) * idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i] * 2;
}
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void slice(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
const int threadId = threadIdx.x;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
if (outOfBounds)
return;
__shared__ scalar_t localValue[BLOCK_SIZE * vd];
scalar_t* myValue = localValue + threadId * vd;
scalar_t myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i];
scalar_t* val = table_values + r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight * val[j];
}
myWeight += r.weight * val[vd];
}
myWeight = 1.0f / myWeight;
for (int j = 0; j < vd; j++) {
values[idx * vd + j] = myValue[j] * myWeight;
}
}
template <typename scalar_t, int vd, int pd>
void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) {
scalar_t blurVariance = accurate ? 0.5 : 0;
scalar_t* scaleFactor;
hipMalloc(&scaleFactor, pd * sizeof(scalar_t));
scalar_t scaleFactorHost[pd];
for (int i = 0; i < pd; i++) {
scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2)));
}
hipMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), hipMemcpyHostToDevice);
MatrixEntry<scalar_t>* matrix;
hipMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>));
scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1));
// Populate constant memory for hash helpers
unsigned long long int __host_two32 = ((unsigned long long int)1) << 32;
unsigned int __host_div_c = 2 * (elementCount * (pd + 1));
unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f));
unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1;
hipMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int));
hipMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int));
hipMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int));
// Populate constant memory with hash of offset vectors
unsigned int hOffset_host[pd + 1];
signed short offset[pd + 1];
for (int i = 0; i < pd; offset[i] = 1, i++)
;
for (int i = 0; i <= pd; i++) {
offset[i] -= pd + 1;
hOffset_host[i] = hash<pd>(offset);
offset[i] += pd + 1;
}
hipMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1));
int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
int blockSize = BLOCK_SIZE;
hipLaunchKernelGGL(( createMatrix<scalar_t, pd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, positions, values, scaleFactor, matrix);
// fix duplicate hash table entries
int tableSize = elementCount * 2 * (pd + 1);
int cleanBlockSize = 32;
int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1;
hipLaunchKernelGGL(( cleanHashTable<scalar_t, pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, tableSize, matrix);
hipLaunchKernelGGL(( splat<scalar_t, pd, vd>), dim3(dim3(blockCount, 1)), dim3(dim3(blockSize, pd + 1)), 0, 0, elementCount, values, matrix, table_values);
if (accurate) {
scalar_t* newValues;
hipMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
hipMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
for (int color = 0; color <= pd; color++) {
hipLaunchKernelGGL(( blur<scalar_t, pd, vd>)
, dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, elementCount * (pd + 1), newValues, matrix, color, table_values);
scalar_t* swap = newValues;
newValues = table_values;
table_values = swap;
}
hipFree(newValues);
}
hipLaunchKernelGGL(( slice<scalar_t, pd, vd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, values, matrix, table_values);
destroyHashTable<scalar_t>();
hipFree(table_values);
hipFree(scaleFactor);
hipFree(matrix);
}
#define DECLARATION(dc, fc) \
template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \
template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate);
DO_FOR_AB(DECLARATION, 16, 19)
| 87cefcf852ce722a2440dfb151f631d21fe36520.cu | /*
Copyright (c) MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Adapted from https://github.com/abadams/permutohedral
which has the following license...
MIT License
Copyright (c) 2020 Andrew Adams
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define BLOCK_SIZE 32
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <torch/extension.h>
#include <THC/THCAtomics.cuh>
#include "hash_table.cuh"
#include "permutohedral.h"
#include "utils/meta_macros.h"
template <typename scalar_t>
struct MatrixEntry {
int index;
scalar_t weight;
};
template <typename scalar_t, int pd>
__global__ static void createMatrix(
const int elementCount,
const scalar_t* positions,
const scalar_t* values,
const scalar_t* scaleFactor,
MatrixEntry<scalar_t>* matrix) {
const int threadId = threadIdx.x;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
scalar_t myElevated[pd + 1];
const scalar_t* myPosition = positions + idx * pd;
int myGreedy[pd + 1];
int myRank[pd + 1];
scalar_t myBarycentric[pd + 2];
__shared__ short keys[pd * BLOCK_SIZE];
short* myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1];
for (int i = pd - 1; i > 0; i--) {
myElevated[i] =
myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i];
}
myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
scalar_t v = myElevated[i] * (1.0f / (pd + 1));
scalar_t up = ceilf(v) * (pd + 1);
scalar_t down = floorf(v) * (pd + 1);
myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down);
sum += myGreedy[i];
}
sum /= pd + 1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
scalar_t iDiff = myElevated[i] - myGreedy[i];
scalar_t jDiff = myElevated[j] - myGreedy[j];
if (iDiff < jDiff || (iDiff == jDiff && i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) // sum too large, need to bring down the ones with the smallest differential
{
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= (pd + 1);
myRank[i] += sum - (pd + 1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) // sum too small, need to bring up the ones with largest differential
{
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += (pd + 1);
myRank[i] += sum + (pd + 1);
} else {
myRank[i] += sum;
}
}
}
#ifdef LINEAR_D_MEMORY
for (int i = 0; i <= pd; i++) {
table_zeros[idx * (pd + 1) + i] = myGreedy[i];
table_rank[idx * (pd + 1) + i] = myRank[i];
}
#endif
// turn delta into barycentric coords
for (int i = 0; i <= pd + 1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1));
myBarycentric[pd - myRank[i]] += delta;
myBarycentric[pd + 1 - myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd + 1];
}
#ifdef USE_ADDITIVE_HASH
unsigned int cumulative_hash = hash<pd>(myGreedy);
#endif
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd - color) {
myKey[i] -= (pd + 1);
}
}
}
#ifdef USE_ADDITIVE_HASH
for (int i = 0; i < pd; i++) {
if (myRank[i] == pd - color) {
cumulative_hash += hOffset[i];
}
}
#endif
if (!outOfBounds) {
MatrixEntry<scalar_t> r;
#ifdef USE_ADDITIVE_HASH
r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color);
#else
r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color);
#endif
r.weight = myBarycentric[color];
matrix[idx * (pd + 1) + color] = r;
}
}
}
template <typename scalar_t, int kd>
__global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= elementCount)
return;
// find my hash table entry
int* e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
#ifdef LINEAR_D_MEMORY
// Get my key
short myKey[kd];
generateKey<kd>(*e, myKey);
*e = hashTableRetrieve<kd>(myKey);
#else
*e = hashTableRetrieve<kd>(table_keys + *e * kd);
#endif
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void splat(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
const int color = threadIdx.y;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const bool outOfBounds = idx >= elementCount;
if (outOfBounds) {
return;
}
scalar_t* myValue = values + idx * vd;
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
scalar_t* val = table_values + r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
gpuAtomicAdd(val + j, myValue[j] * r.weight);
}
gpuAtomicAdd(val + vd, r.weight);
}
// splat splits by color, so extend the y coordinate to our blocks to represent that
// dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1);
// dim3 oldblockSize(8, 8, 1);
// oldblocks.y *= pd+1;
// splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix);
// int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
// int blockSize = BLOCK_SIZE;
// splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix);
template <typename scalar_t, int pd, int vd>
__global__ static void splatCache(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
// const int x = threadIdx.x + blockIdx.x * blockDim.x;
// const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
// const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
// const int color = blockIdx.y % (pd+1);
// const int idx = y*w + x;
const int threadId = threadIdx.x;
const int color = threadIdx.y;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
__shared__ int sharedOffsets[BLOCK_SIZE];
__shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)];
int myOffset = -1;
scalar_t* myValue = sharedValues + threadId * (vd + 1);
if (!outOfBounds) {
scalar_t* value = values + idx * vd;
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
myValue[j] = value[j] * r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds)
return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
scalar_t* val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
gpuAtomicAdd(val + j, myValue[j]);
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void blur(
int n,
scalar_t* newValues,
MatrixEntry<scalar_t>* matrix,
int color,
scalar_t* table_values) {
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n)
return;
// Check if I'm valid
if (matrix[idx].index != idx)
return;
// find my key and the keys of my neighbours
short myKey[pd + 1];
short np[pd + 1];
short nm[pd + 1];
#ifdef LINEAR_D_MEMORY
generateKey<pd>(idx, myKey);
for (int i = 0; i < pd; i++) {
np[i] = myKey[i] + 1;
nm[i] = myKey[i] - 1;
}
#else
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx * pd + i];
np[i] = myKey[i] + 1;
nm[i] = myKey[i] - 1;
}
#endif
np[color] -= pd + 1;
nm[color] += pd + 1;
#ifdef USE_ADDITIVE_HASH
unsigned int hCurrent = hash<pd>(myKey);
int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np);
int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm);
#else
int offNp = hashTableRetrieve<pd>(np);
int offNm = hashTableRetrieve<pd>(nm);
#endif
scalar_t* valMe = table_values + (vd + 1) * idx;
scalar_t* valNp = table_values + (vd + 1) * offNp;
scalar_t* valNm = table_values + (vd + 1) * offNm;
scalar_t* valOut = newValues + (vd + 1) * idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i] * 2;
}
}
}
template <typename scalar_t, int pd, int vd>
__global__ static void slice(
const int elementCount,
scalar_t* values,
MatrixEntry<scalar_t>* matrix,
scalar_t* table_values) {
const int threadId = threadIdx.x;
const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
const bool outOfBounds = idx >= elementCount;
if (outOfBounds)
return;
__shared__ scalar_t localValue[BLOCK_SIZE * vd];
scalar_t* myValue = localValue + threadId * vd;
scalar_t myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i];
scalar_t* val = table_values + r.index * (vd + 1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight * val[j];
}
myWeight += r.weight * val[vd];
}
myWeight = 1.0f / myWeight;
for (int j = 0; j < vd; j++) {
values[idx * vd + j] = myValue[j] * myWeight;
}
}
template <typename scalar_t, int vd, int pd>
void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) {
scalar_t blurVariance = accurate ? 0.5 : 0;
scalar_t* scaleFactor;
cudaMalloc(&scaleFactor, pd * sizeof(scalar_t));
scalar_t scaleFactorHost[pd];
for (int i = 0; i < pd; i++) {
scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2)));
}
cudaMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), cudaMemcpyHostToDevice);
MatrixEntry<scalar_t>* matrix;
cudaMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>));
scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1));
// Populate constant memory for hash helpers
unsigned long long int __host_two32 = ((unsigned long long int)1) << 32;
unsigned int __host_div_c = 2 * (elementCount * (pd + 1));
unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f));
unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1;
cudaMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int));
cudaMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int));
cudaMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int));
// Populate constant memory with hash of offset vectors
unsigned int hOffset_host[pd + 1];
signed short offset[pd + 1];
for (int i = 0; i < pd; offset[i] = 1, i++)
;
for (int i = 0; i <= pd; i++) {
offset[i] -= pd + 1;
hOffset_host[i] = hash<pd>(offset);
offset[i] += pd + 1;
}
cudaMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1));
int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
int blockSize = BLOCK_SIZE;
createMatrix<scalar_t, pd><<<blockCount, blockSize>>>(elementCount, positions, values, scaleFactor, matrix);
// fix duplicate hash table entries
int tableSize = elementCount * 2 * (pd + 1);
int cleanBlockSize = 32;
int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1;
cleanHashTable<scalar_t, pd><<<cleanBlocks, cleanBlockSize>>>(tableSize, matrix);
splat<scalar_t, pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd + 1)>>>(elementCount, values, matrix, table_values);
if (accurate) {
scalar_t* newValues;
cudaMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
cudaMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
for (int color = 0; color <= pd; color++) {
blur<scalar_t, pd, vd>
<<<cleanBlocks, cleanBlockSize>>>(elementCount * (pd + 1), newValues, matrix, color, table_values);
scalar_t* swap = newValues;
newValues = table_values;
table_values = swap;
}
cudaFree(newValues);
}
slice<scalar_t, pd, vd><<<blockCount, blockSize>>>(elementCount, values, matrix, table_values);
destroyHashTable<scalar_t>();
cudaFree(table_values);
cudaFree(scaleFactor);
cudaFree(matrix);
}
#define DECLARATION(dc, fc) \
template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \
template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate);
DO_FOR_AB(DECLARATION, 16, 19)
|
d5c700f8f3fd42e0885f712ceb441e47047628c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TS 32 // Tile size
template<typename Int, typename Alpha, typename TypeA, typename Beta, typename TypeX,
typename TypeY>
__global__ void gemv(bool trans, Int m, Int n, Alpha alpha, TypeA *a, Int lda, TypeX *x, Int incx,
Beta beta, TypeY *y, Int incy) {
const Int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < m) {
TypeY acc = 0;
if (trans) {
for (Int i = 0; i < n; i++) { acc += a[idx + i * lda] * x[i * incx]; }
} else {
for (Int i = 0; i < n; i++) { acc += a[i + idx * lda] * x[i * incx]; }
}
y[idx * incy] = alpha * acc + beta * y[idx * incy];
}
}
| d5c700f8f3fd42e0885f712ceb441e47047628c2.cu | #define TS 32 // Tile size
template<typename Int, typename Alpha, typename TypeA, typename Beta, typename TypeX,
typename TypeY>
__global__ void gemv(bool trans, Int m, Int n, Alpha alpha, TypeA *a, Int lda, TypeX *x, Int incx,
Beta beta, TypeY *y, Int incy) {
const Int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < m) {
TypeY acc = 0;
if (trans) {
for (Int i = 0; i < n; i++) { acc += a[idx + i * lda] * x[i * incx]; }
} else {
for (Int i = 0; i < n; i++) { acc += a[i + idx * lda] * x[i * incx]; }
}
y[idx * incy] = alpha * acc + beta * y[idx * incy];
}
}
|
a7f7f010ff9ef38fc29cde8a038304837ac2f1af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "reduction.h"
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
__global__ void
reduction_kernel(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
// cumulates input with grid-stride loop and save to share memory
float input = 0.f;
for (int i = idx_x; i < size; i += blockDim.x * gridDim.x)
input += g_in[i];
s_data[threadIdx.x] = input;
__syncthreads();
// do reduction
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0) {
g_out[blockIdx.x] = s_data[0];
}
}
int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int num_sms;
int num_blocks_per_sm;
hipDeviceGetAttribute(&num_sms, hipDeviceAttributeMultiprocessorCount, 0);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, reduction_kernel, n_threads, n_threads*sizeof(float));
int n_blocks = min(num_blocks_per_sm * num_sms, (size + n_threads - 1) / n_threads);
hipLaunchKernelGGL(( reduction_kernel), dim3(n_blocks), dim3(n_threads), n_threads * sizeof(float), 0, g_outPtr, g_inPtr, size);
hipLaunchKernelGGL(( reduction_kernel), dim3(1), dim3(n_threads), n_threads * sizeof(float), 0, g_outPtr, g_inPtr, n_blocks);
return 1;
} | a7f7f010ff9ef38fc29cde8a038304837ac2f1af.cu | #include <stdio.h>
#include "reduction.h"
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
__global__ void
reduction_kernel(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
// cumulates input with grid-stride loop and save to share memory
float input = 0.f;
for (int i = idx_x; i < size; i += blockDim.x * gridDim.x)
input += g_in[i];
s_data[threadIdx.x] = input;
__syncthreads();
// do reduction
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0) {
g_out[blockIdx.x] = s_data[0];
}
}
int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int num_sms;
int num_blocks_per_sm;
cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, 0);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, reduction_kernel, n_threads, n_threads*sizeof(float));
int n_blocks = min(num_blocks_per_sm * num_sms, (size + n_threads - 1) / n_threads);
reduction_kernel<<<n_blocks, n_threads, n_threads * sizeof(float), 0>>>(g_outPtr, g_inPtr, size);
reduction_kernel<<<1, n_threads, n_threads * sizeof(float), 0>>>(g_outPtr, g_inPtr, n_blocks);
return 1;
} |
2ba701b5f7beb6f9db813d1bd740cdb5ffe89d46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CChanel_AWGN_SIMD.h"
#define CURAND_CALL(x) do { if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error (%d) at %s:%d\n", x, __FILE__,__LINE__); \
exit(0);}} while(0)
__global__ void GenerateNoiseAndTransform(const float *A, const float *B, int *C, float SigB, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
float vSin, vCos, x, y;
union {char c[4]; unsigned int i;} res_a = {0, 0, 0, 0};
union {char c[4]; unsigned int i;} res_b = {0, 0, 0, 0};
for(int p=0; p<4; p++){
x = sqrt(-2.0 * log( A[i + p * N] ));
y = B[i + p * N];
sincosf(_2pi * y, &vSin, &vCos);
float v1 = (-1.0 + (x * vSin) * SigB);
float v2 = (-1.0 + (x * vCos) * SigB);
res_a.c[p] = (char)fminf( fmaxf(8.0f * v1, -31.0f), 31.0f);
res_b.c[p] = (char)fminf( fmaxf(8.0f * v2, -31.0f), 31.0f);
}
C[i] = res_a.i;
C[i+N] = res_b.i;
}
}
//#define SEQ_LEVEL 1
CChanel_AWGN_SIMD::CChanel_AWGN_SIMD(CTrame *t, int _BITS_LLR, bool QPSK, bool Es_N0) : CChanel(t, _BITS_LLR, QPSK, Es_N0){
hiprandStatus_t Status;
SEQ_LEVEL = 1 + ((_data > 10000) ? 3 : 0);
unsigned int nb_ech = (_frames * _data) / SEQ_LEVEL;
Status = hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
CURAND_CALL(Status);
Status = hiprandSetPseudoRandomGeneratorSeed(generator, 1234ULL);
CURAND_CALL(Status);
CUDA_MALLOC_DEVICE(&device_A, nb_ech/2,__FILE__, __LINE__);
CUDA_MALLOC_DEVICE(&device_B, nb_ech/2,__FILE__, __LINE__);
CUDA_MALLOC_DEVICE(&device_R, nb_ech ,__FILE__, __LINE__);
}
CChanel_AWGN_SIMD::~CChanel_AWGN_SIMD(){
hipError_t Status;
Status = hipFree(device_A);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(device_B);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(device_R);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
hiprandStatus_t eStatus;
eStatus = hiprandDestroyGenerator(generator);
CURAND_CALL(eStatus);
// printf("(DD) STOP CChanel_AWGN_SIMD::~CChanel_AWGN_SIMD() !\n");
}
void CChanel_AWGN_SIMD::configure(double _Eb_N0) {
rendement = (float) (_vars) / (float) (_data);
if (es_n0) {
Eb_N0 = _Eb_N0 - 10.0 * log10(2 * rendement);
} else {
Eb_N0 = _Eb_N0;
}
double interm = 10.0 * log10(rendement);
interm = -0.1*((double)Eb_N0+interm);
SigB = sqrt(pow(10.0,interm)/2);
}
#include <limits.h>
#define MAX_RANDOM LONG_MAX /* Maximum value of random() */
double CChanel_AWGN_SIMD::awgn(double amp)
{
return 0.00;
}
#define QPSK 0.707106781
#define BPSK 1.0
void CChanel_AWGN_SIMD::generate()
{
size_t nb_rand_data = _frames*_data / 2 / SEQ_LEVEL;
CURAND_CALL( hiprandGenerateUniform( generator, device_A, nb_rand_data ) );
CURAND_CALL( hiprandGenerateUniform( generator, device_B, nb_rand_data ) );
for(int i=0; i<4 * SEQ_LEVEL; i++){
// GENERATION DE DU BRUIT DU CANAL
size_t nb_noise_sample = nb_rand_data / 2;
int threadsPerBlock = 1024;
size_t blocksPerGrid = (nb_noise_sample + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( GenerateNoiseAndTransform), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_A, device_B, (int*)device_R, (float)SigB, nb_noise_sample/2);
hipError_t eStatus = hipMemcpyAsync(&t_noise_data[i * nb_noise_sample], device_R, nb_noise_sample * sizeof(float), hipMemcpyDeviceToHost);
if( i != 3 ){
CURAND_CALL( hiprandGenerateUniform( generator, device_A, nb_rand_data ) );
CURAND_CALL( hiprandGenerateUniform( generator, device_B, nb_rand_data ) );
}
hipDeviceSynchronize();
ERROR_CHECK(hipGetLastError(), __FILE__, __LINE__);
}
}
| 2ba701b5f7beb6f9db813d1bd740cdb5ffe89d46.cu | #include "CChanel_AWGN_SIMD.h"
#define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error (%d) at %s:%d\n", x, __FILE__,__LINE__); \
exit(0);}} while(0)
__global__ void GenerateNoiseAndTransform(const float *A, const float *B, int *C, float SigB, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
float vSin, vCos, x, y;
union {char c[4]; unsigned int i;} res_a = {0, 0, 0, 0};
union {char c[4]; unsigned int i;} res_b = {0, 0, 0, 0};
for(int p=0; p<4; p++){
x = sqrt(-2.0 * log( A[i + p * N] ));
y = B[i + p * N];
sincosf(_2pi * y, &vSin, &vCos);
float v1 = (-1.0 + (x * vSin) * SigB);
float v2 = (-1.0 + (x * vCos) * SigB);
res_a.c[p] = (char)fminf( fmaxf(8.0f * v1, -31.0f), 31.0f);
res_b.c[p] = (char)fminf( fmaxf(8.0f * v2, -31.0f), 31.0f);
}
C[i] = res_a.i;
C[i+N] = res_b.i;
}
}
//#define SEQ_LEVEL 1
CChanel_AWGN_SIMD::CChanel_AWGN_SIMD(CTrame *t, int _BITS_LLR, bool QPSK, bool Es_N0) : CChanel(t, _BITS_LLR, QPSK, Es_N0){
curandStatus_t Status;
SEQ_LEVEL = 1 + ((_data > 10000) ? 3 : 0);
unsigned int nb_ech = (_frames * _data) / SEQ_LEVEL;
Status = curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
CURAND_CALL(Status);
Status = curandSetPseudoRandomGeneratorSeed(generator, 1234ULL);
CURAND_CALL(Status);
CUDA_MALLOC_DEVICE(&device_A, nb_ech/2,__FILE__, __LINE__);
CUDA_MALLOC_DEVICE(&device_B, nb_ech/2,__FILE__, __LINE__);
CUDA_MALLOC_DEVICE(&device_R, nb_ech ,__FILE__, __LINE__);
}
CChanel_AWGN_SIMD::~CChanel_AWGN_SIMD(){
cudaError_t Status;
Status = cudaFree(device_A);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(device_B);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(device_R);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
curandStatus_t eStatus;
eStatus = curandDestroyGenerator(generator);
CURAND_CALL(eStatus);
// printf("(DD) STOP CChanel_AWGN_SIMD::~CChanel_AWGN_SIMD() !\n");
}
void CChanel_AWGN_SIMD::configure(double _Eb_N0) {
rendement = (float) (_vars) / (float) (_data);
if (es_n0) {
Eb_N0 = _Eb_N0 - 10.0 * log10(2 * rendement);
} else {
Eb_N0 = _Eb_N0;
}
double interm = 10.0 * log10(rendement);
interm = -0.1*((double)Eb_N0+interm);
SigB = sqrt(pow(10.0,interm)/2);
}
#include <limits.h>
#define MAX_RANDOM LONG_MAX /* Maximum value of random() */
double CChanel_AWGN_SIMD::awgn(double amp)
{
return 0.00;
}
#define QPSK 0.707106781
#define BPSK 1.0
void CChanel_AWGN_SIMD::generate()
{
size_t nb_rand_data = _frames*_data / 2 / SEQ_LEVEL;
CURAND_CALL( curandGenerateUniform( generator, device_A, nb_rand_data ) );
CURAND_CALL( curandGenerateUniform( generator, device_B, nb_rand_data ) );
for(int i=0; i<4 * SEQ_LEVEL; i++){
// GENERATION DE DU BRUIT DU CANAL
size_t nb_noise_sample = nb_rand_data / 2;
int threadsPerBlock = 1024;
size_t blocksPerGrid = (nb_noise_sample + threadsPerBlock - 1) / threadsPerBlock;
GenerateNoiseAndTransform<<<blocksPerGrid, threadsPerBlock>>>(device_A, device_B, (int*)device_R, (float)SigB, nb_noise_sample/2);
cudaError_t eStatus = cudaMemcpyAsync(&t_noise_data[i * nb_noise_sample], device_R, nb_noise_sample * sizeof(float), cudaMemcpyDeviceToHost);
if( i != 3 ){
CURAND_CALL( curandGenerateUniform( generator, device_A, nb_rand_data ) );
CURAND_CALL( curandGenerateUniform( generator, device_B, nb_rand_data ) );
}
cudaDeviceSynchronize();
ERROR_CHECK(cudaGetLastError(), __FILE__, __LINE__);
}
}
|
590b308cf6c9fd9483ee9f1a8a346ce8c8937c4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_normals.h"
#include "launch_utils.h"
namespace roo
{
//////////////////////////////////////////////////////
// Normals from VBO
//////////////////////////////////////////////////////
__global__ void KernNormalsFromVbo(Image<float4> dN, const Image<float4> dV)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < dN.w && v < dN.h) {
if( u+1 < dN.w && v+1 < dN.h) {
const float4 Vc = dV(u,v);
const float4 Vr = dV(u+1,v);
const float4 Vu = dV(u,v+1);
const float4 a = Vr - Vc;
const float4 b = Vu - Vc;
const float3 axb = make_float3(
a.y*b.z - a.z*b.y,
a.z*b.x - a.x*b.z,
a.x*b.y - a.y*b.x
);
const float magaxb = length(axb);
const float4 N = make_float4(-axb.x/magaxb, -axb.y/magaxb, -axb.z/magaxb,1);
dN(u,v) = N;
}else{
dN(u,v) = make_float4(0,0,0,0);
}
}
}
void NormalsFromVbo(Image<float4> dN, const Image<float4> dV)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, dN);
hipLaunchKernelGGL(( KernNormalsFromVbo), dim3(gridDim),dim3(blockDim), 0, 0, dN, dV);
}
}
| 590b308cf6c9fd9483ee9f1a8a346ce8c8937c4e.cu | #include "cu_normals.h"
#include "launch_utils.h"
namespace roo
{
//////////////////////////////////////////////////////
// Normals from VBO
//////////////////////////////////////////////////////
__global__ void KernNormalsFromVbo(Image<float4> dN, const Image<float4> dV)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < dN.w && v < dN.h) {
if( u+1 < dN.w && v+1 < dN.h) {
const float4 Vc = dV(u,v);
const float4 Vr = dV(u+1,v);
const float4 Vu = dV(u,v+1);
const float4 a = Vr - Vc;
const float4 b = Vu - Vc;
const float3 axb = make_float3(
a.y*b.z - a.z*b.y,
a.z*b.x - a.x*b.z,
a.x*b.y - a.y*b.x
);
const float magaxb = length(axb);
const float4 N = make_float4(-axb.x/magaxb, -axb.y/magaxb, -axb.z/magaxb,1);
dN(u,v) = N;
}else{
dN(u,v) = make_float4(0,0,0,0);
}
}
}
void NormalsFromVbo(Image<float4> dN, const Image<float4> dV)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, dN);
KernNormalsFromVbo<<<gridDim,blockDim>>>(dN, dV);
}
}
|
961d4105ae8f4020cb614a7cf1fb9b6e43a34e40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctime>
#include <iostream>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] * b[id];
}
void norm_add()
{
using namespace std;
int n = 1000;
double a[n];
double b[n];
double c[n];
clock_t begin = clock();
for(int i = 0 ; i< n ;i++)
{
c[i] = a[i] + b[i];
}
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
std::cout << "norm add eslaped time"<< elapsed_secs<<std::endl;
}
void add(double * h_a , double * h_b , double *h_c,int n)
{
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
}
int addition( )
{
using namespace std;
// Size of vectors
clock_t begin = clock();
int n = 1000000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
/* for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}*/
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
std::cout << "kernel add eslaped time:"<< elapsed_secs<<std::endl;
return 0;
}
| 961d4105ae8f4020cb614a7cf1fb9b6e43a34e40.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctime>
#include <iostream>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] * b[id];
}
void norm_add()
{
using namespace std;
int n = 1000;
double a[n];
double b[n];
double c[n];
clock_t begin = clock();
for(int i = 0 ; i< n ;i++)
{
c[i] = a[i] + b[i];
}
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
std::cout << "norm add eslaped time"<< elapsed_secs<<std::endl;
}
void add(double * h_a , double * h_b , double *h_c,int n)
{
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
}
int addition( )
{
using namespace std;
// Size of vectors
clock_t begin = clock();
int n = 1000000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
/* for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}*/
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
std::cout << "kernel add eslaped time:"<< elapsed_secs<<std::endl;
return 0;
}
|
7754e32f59ea150f629166de7612609cc281a5ce.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include "../DataBuffer.h"
#include <array/DataTypeUtils.h>
#include <system/op_boilerplate.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <exceptions/allocation_exception.h>
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t *newBuffer = nullptr;
int8_t *newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t *>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
hipMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, hipMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t *>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits", sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId), getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if(isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = hipStreamSynchronize(*context->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = hipMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToHost);
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToPrimary hipMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr)
return;
if(isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = hipMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToSpecial hipMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if(_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetOther) { // copies only to special buffer
if(other._primaryBuffer == nullptr && other._specialBuffer == nullptr)
return;
if(sizeToCopyinBytes == 0)
sizeToCopyinBytes = other.getLenInBytes();
if(sizeToCopyinBytes == 0)
return;
if(other.isPrimaryActual()) {
auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
}
else {
auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, hipMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetHostBuffer) { // copies only to special buffer
if(hostBuffer == nullptr)
return;
if(sizeToCopyinBytes == 0)
sizeToCopyinBytes = getLenInBytes();
if(sizeToCopyinBytes == 0)
return;
auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType), sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if(allocBoth)
allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
hipMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if(both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer &dst, const DataBuffer &src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), hipMemcpyDeviceToDevice, *LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), hipMemcpyHostToDevice, *LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0)
throw cuda_exception::build("DataBuffer::memcpy: hipMemcpyAsync failed!", res);
res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = hipMemcpy(newBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::migrate: hipMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const {_writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const { return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load()); }
bool DataBuffer::isSpecialActual() const { return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load()); }
}
| 7754e32f59ea150f629166de7612609cc281a5ce.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include "../DataBuffer.h"
#include <array/DataTypeUtils.h>
#include <system/op_boilerplate.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <exceptions/allocation_exception.h>
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t *newBuffer = nullptr;
int8_t *newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t *>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
cudaMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, cudaMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t *>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits", sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId), getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if(isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = cudaStreamSynchronize(*context->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = cudaMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToHost);
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToPrimary cudaMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr)
return;
if(isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = cudaMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::syncToSpecial cudaMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if(_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetOther) { // copies only to special buffer
if(other._primaryBuffer == nullptr && other._specialBuffer == nullptr)
return;
if(sizeToCopyinBytes == 0)
sizeToCopyinBytes = other.getLenInBytes();
if(sizeToCopyinBytes == 0)
return;
if(other.isPrimaryActual()) {
auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
}
else {
auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, cudaMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetHostBuffer) { // copies only to special buffer
if(hostBuffer == nullptr)
return;
if(sizeToCopyinBytes == 0)
sizeToCopyinBytes = getLenInBytes();
if(sizeToCopyinBytes == 0)
return;
auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType), sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if(allocBoth)
allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
cudaMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if(both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer &dst, const DataBuffer &src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), cudaMemcpyDeviceToDevice, *LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), cudaMemcpyHostToDevice, *LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0)
throw cuda_exception::build("DataBuffer::memcpy: cudaMemcpyAsync failed!", res);
res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0)
throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = cudaMemcpy(newBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::migrate: cudaMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const {_writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const { return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load()); }
bool DataBuffer::isSpecialActual() const { return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load()); }
}
|
2d04e4b98e2adeb1458306955d58334278b37e73.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file color_app.cu
*
* @brief Graph Coloring Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// Graph Coloring
#include <gunrock/app/color/color_enactor.cuh>
#include <gunrock/app/color/color_test.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace color {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<unsigned int>(
"num-colors",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of output colors", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Serially compare rand to all node neighbor, disable to use advance \
neighbor reduce (default=false)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Enable coloring with minimum independent set as well as \
maximum(default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Perform test run to atomically generate max iteration (default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"user-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations color should run for (default=3).", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Resolve color conflict, 0 to skip check, 1 to check at end of\
every iteration with random,\
2 to check at end of every iteration with degree(default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Needed to allocate memory for hash function, if parameter is\
positive,\
hash coloring is used instead of random coloring (default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL),
"seed for random number generator", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"load balancing enabled for graph coloring (true=neighbor_reduce)",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run color tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
bool color_balance, typename GraphT::VertexT *ref_colors,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("color", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_colors = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
int num_colors = 0;
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_colors));
SizeT num_errors = Validate_Results(parameters, graph, h_colors,
ref_colors, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_colors));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors,
false);
}
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = h_colors[v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors++;
}
}
util::PrintMsg("Number of colors: " + std::to_string(num_colors), !quiet_mode);
parameters.Set("num-colors", num_colors);
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_colors;
h_colors = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace color
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_color function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_color(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **colors, SizeT *num_colors) {
typedef gunrock::app::color::Problem<GraphT> ProblemT;
typedef gunrock::app::color::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(colors[run_num]);
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = colors[run_num][v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors[run_num] += 1;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double color(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **colors, int *num_colors,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("color");
gunrock::graphio::UseParameters(parameters);
gunrock::app::color::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the graph coloring
double elapsed_time = gunrock_color(parameters, graph, colors, num_colors);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double color(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *colors, int num_colors) {
return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&colors, &num_colors);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 2d04e4b98e2adeb1458306955d58334278b37e73.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file color_app.cu
*
* @brief Graph Coloring Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// Graph Coloring
#include <gunrock/app/color/color_enactor.cuh>
#include <gunrock/app/color/color_test.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace color {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<unsigned int>(
"num-colors",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of output colors", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Serially compare rand to all node neighbor, disable to use advance \
neighbor reduce (default=false)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Enable coloring with minimum independent set as well as \
maximum(default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Perform test run to atomically generate max iteration (default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"user-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations color should run for (default=3).", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Resolve color conflict, 0 to skip check, 1 to check at end of\
every iteration with random,\
2 to check at end of every iteration with degree(default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Needed to allocate memory for hash function, if parameter is\
positive,\
hash coloring is used instead of random coloring (default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL),
"seed for random number generator", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"load balancing enabled for graph coloring (true=neighbor_reduce)",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run color tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
bool color_balance, typename GraphT::VertexT *ref_colors,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("color", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_colors = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
int num_colors = 0;
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_colors));
SizeT num_errors = Validate_Results(parameters, graph, h_colors,
ref_colors, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_colors));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors,
false);
}
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = h_colors[v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors++;
}
}
util::PrintMsg("Number of colors: " + std::to_string(num_colors), !quiet_mode);
parameters.Set("num-colors", num_colors);
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_colors;
h_colors = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace color
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_color function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_color(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **colors, SizeT *num_colors) {
typedef gunrock::app::color::Problem<GraphT> ProblemT;
typedef gunrock::app::color::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(colors[run_num]);
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = colors[run_num][v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors[run_num] += 1;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double color(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **colors, int *num_colors,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("color");
gunrock::graphio::UseParameters(parameters);
gunrock::app::color::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the graph coloring
double elapsed_time = gunrock_color(parameters, graph, colors, num_colors);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double color(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *colors, int num_colors) {
return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&colors, &num_colors);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
8dcb13de2f752a4168fc171db78741917a1b2fca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// Graph generation
// Author: Ramakrishna Prabhu [email protected]
#include <stdio.h>
#include <string>
#include <omp.h>
// Utilities and correctness-checking
#include <gunrock/util/multithread_utils.cuh>
#include <gunrock/util/sort_omp.cuh>
#include <gunrock/csr.cuh>
#include <gunrock/graphio/grmat.cuh>
#include <gunrock/coo.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <gunrock/util/shared_utils.cuh>
#include <cudf/cudf.h>
#include <thrust/extrema.h>
#include "utilities/error_utils.h"
#include "graph_utils.cuh"
#include <rmm_utils.h>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::graphio;
using namespace gunrock::graphio::grmat;
template <typename VertexId, typename Value, typename SizeT>
__global__ void Remove_Self_Loops (VertexId* row, VertexId* col, Value* val, SizeT edges)
{
SizeT i = (SizeT)blockIdx.x * blockDim.x + threadIdx.x;
if (i < edges)
{
if (row[i] == col[i])
{
col[i] = 0;
}
}
}
// rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)
// Generate R-MAT graph as input
// --rmat_scale=<vertex-scale>
// --rmat_nodes=<number-nodes>
// --rmat_edgefactor=<edge-factor>
// --rmat_edges=<number-edges>
// --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>
// --rmat_self_loops If this option is supplied, then self loops will be retained
// --rmat_undirected If this option is not mentioned, then the graps will be undirected
// Optional arguments:
// [--device=<device_index>] Set GPU(s) for testing (Default: 0).
// [--quiet] No output (unless --json is specified).
// [--random_seed] This will enable usage of random seed, else it will use same seed
// [--normalized]\n
template<
typename VertexId,
typename SizeT,
typename Value>
gdf_error main_(gdf_column *src, gdf_column *dest, gdf_column *val, CommandLineArgs *args, size_t &vertices, size_t &edges)
{
CpuTimer cpu_timer, cpu_timer2;
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
SizeT rmat_scale = 10;
SizeT rmat_edgefactor = 48;
double rmat_a = 0.57;
double rmat_b = 0.19;
double rmat_c = 0.19;
double rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
double rmat_vmin = 1;
double rmat_vmultipiler = 64;
int rmat_seed = 888;
bool undirected = false;
bool self_loops = false;
SizeT rmat_all_edges = rmat_edges;
std::string file_name;
bool quiet = false;
typedef Coo_nv<VertexId, Value> EdgeTupleType;
cpu_timer.Start();
if (args->CheckCmdLineFlag ("rmat_scale") && args->CheckCmdLineFlag ("rmat_nodes"))
{
printf ("Please mention scale or nodes, not both \n");
return GDF_UNSUPPORTED_METHOD;
}
else if (args->CheckCmdLineFlag ("rmat_edgefactor") && args->CheckCmdLineFlag ("rmat_edges"))
{
printf ("Please mention edgefactor or edge, not both \n");
return GDF_UNSUPPORTED_METHOD;
}
self_loops = args->CheckCmdLineFlag ("rmat_self_loops");
// graph construction or generation related parameters
if (args -> CheckCmdLineFlag("normalized"))
undirected = args -> CheckCmdLineFlag("rmat_undirected");
else undirected = true; // require undirected input graph when unnormalized
quiet = args->CheckCmdLineFlag("quiet");
args->GetCmdLineArgument("rmat_scale", rmat_scale);
rmat_nodes = 1 << rmat_scale;
args->GetCmdLineArgument("rmat_nodes", rmat_nodes);
args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor);
rmat_edges = rmat_nodes * rmat_edgefactor;
args->GetCmdLineArgument("rmat_edges", rmat_edges);
args->GetCmdLineArgument("rmat_a", rmat_a);
args->GetCmdLineArgument("rmat_b", rmat_b);
args->GetCmdLineArgument("rmat_c", rmat_c);
rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
args->GetCmdLineArgument("rmat_d", rmat_d);
args->GetCmdLineArgument("rmat_vmin", rmat_vmin);
args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler);
args->GetCmdLineArgument("file_name", file_name);
if (args->CheckCmdLineFlag("random_seed"))
{
rmat_seed = -1;
}
EdgeTupleType coo;
if (undirected == true)
{
rmat_all_edges = 2 * rmat_edges;
}
else
{
rmat_all_edges = rmat_edges;
}
std::vector<int> temp_devices;
if (args->CheckCmdLineFlag("device")) // parse device list
{
args->GetCmdLineArguments<int>("device", temp_devices);
}
else // use single device with index 0
{
int gpu_idx;
util::GRError(hipGetDevice(&gpu_idx),
"hipGetDevice failed", __FILE__, __LINE__);
temp_devices.push_back(gpu_idx);
}
int *gpu_idx = new int[temp_devices.size()];
for (unsigned int i=0; i<temp_devices.size(); i++)
gpu_idx[i] = temp_devices[i];
if (!quiet)
{
printf ("---------Graph properties-------\n"
" Undirected : %s\n"
" Nodes : %lld\n"
" Edges : %lld\n"
" a = %f, b = %f, c = %f, d = %f\n\n\n", ((undirected == true)? "True": "False"), (long long)rmat_nodes,
(long long)(rmat_edges * ((undirected == true)? 2: 1)), rmat_a, rmat_b, rmat_c, rmat_d);
}
if (util::SetDevice(gpu_idx[0]))
return GDF_CUDA_ERROR;
hipStream_t stream {nullptr};
ALLOC_TRY((void**)&coo.row, sizeof(VertexId) * rmat_all_edges, stream);
ALLOC_TRY((void**)&coo.col, sizeof(VertexId) * rmat_all_edges, stream);
if (val != nullptr)
{
ALLOC_TRY((void**)&coo.val, sizeof(Value) * rmat_all_edges, stream);
}
if ((coo.row == NULL) ||(coo.col == NULL))
{
if (!quiet)
printf ("Error: Cuda malloc failed \n");
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
return GDF_CUDA_ERROR;
}
cpu_timer2.Start();
hipError_t status = hipSuccess;
if(val == nullptr)
status = BuildRmatGraph_coo_nv<false, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected,
rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed,
quiet, temp_devices.size(), gpu_idx);
else
status = BuildRmatGraph_coo_nv<true, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected,
rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed,
quiet, temp_devices.size(), gpu_idx);
cpu_timer2.Stop();
if (status == hipSuccess)
{
if (!quiet)
printf ("Graph has been generated \n");
}
else
{
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
if (coo.val != nullptr)
ALLOC_FREE_TRY(coo.val, stream);
return GDF_CUDA_ERROR;
}
int block_size = (sizeof(VertexId) == 4) ? 1024 : 512;
int grid_size = rmat_all_edges / block_size + 1;
if (util::SetDevice(gpu_idx[0]))
return GDF_CUDA_ERROR;
if ((self_loops != false) && (val != nullptr))
{
hipLaunchKernelGGL(( Remove_Self_Loops
<VertexId, Value, SizeT>)
, dim3(grid_size), dim3(block_size), 0, 0,
coo.row, coo.col, coo.val, rmat_all_edges);
}
cugraph::detail::remove_duplicate (coo.row, coo.col, coo.val, rmat_all_edges);
thrust::device_ptr<VertexId> tmp;
VertexId nodes_row = 0;
VertexId nodes_col = 0;
hipMemcpy((void*)&nodes_row, (void*)&(coo.row[rmat_all_edges-1]), sizeof(VertexId), hipMemcpyDeviceToHost);
tmp = thrust::max_element(rmm::exec_policy(stream)->on(stream),
thrust::device_pointer_cast((VertexId*)(coo.col)),
thrust::device_pointer_cast((VertexId*)(coo.col + rmat_all_edges)));
nodes_col = tmp[0];
VertexId max_nodes = (nodes_row > nodes_col)? nodes_row: nodes_col;
cpu_timer.Stop();
if ((src != nullptr) && (dest != nullptr))
{
src->data = coo.row;
src->size = rmat_all_edges;
src->valid = nullptr;
dest->data = coo.col;
dest->size = rmat_all_edges;
dest->valid = nullptr;
}
else
{
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
if (coo.val != nullptr)
ALLOC_FREE_TRY(coo.val, stream);
if (!quiet)
printf ("Error : Pointers for gdf column are null, releasing allocated memory for graph\n");
return GDF_CUDA_ERROR;
}
if (val != nullptr)
{
val->data = coo.val;
val->size = rmat_all_edges;
val->valid = nullptr;
}
vertices = max_nodes+1;
edges = rmat_all_edges;
if (!quiet)
printf ("Time to generate the graph %f ms\n"
"Total time %f ms\n", cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis());
}
void free_args (char argc, char** args)
{
for (int i = 0; i < argc; i++)
free(args[i]);
}
gdf_error gdf_grmat_gen (const char* argv, size_t& vertices, size_t& edges, gdf_column *src, gdf_column *dest, gdf_column *val)
{
int argc = 0;
char* arg[32] = {0};
char* tmp = nullptr;
char tmp_argv [1024] = {0};
strcpy(tmp_argv, argv);
tmp = strtok (tmp_argv, " ");
for (int i = 0; tmp != nullptr; i++)
{
arg[i] = (char*) malloc (sizeof(char)*(strlen(tmp)+1));
strcpy(arg[i], tmp);
argc += 1;
tmp = strtok(NULL, " ");
}
CommandLineArgs args(argc, arg);
int graph_args = argc - args.ParsedArgc() - 1;
gdf_error status = GDF_CUDA_ERROR;
if (src == nullptr || dest == nullptr)
{
free_args(argc, arg);
return GDF_DATASET_EMPTY;
}
CUGRAPH_EXPECTS ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH);
CUGRAPH_EXPECTS (src->null_count == 0, "Column must be valid");
if (argc < 2 || args.CheckCmdLineFlag("help"))
{
free_args(argc, arg);
return GDF_UNSUPPORTED_METHOD;
}
if (src->dtype == GDF_INT64)
{
if ((val != nullptr) && (val->dtype == GDF_FLOAT64))
{
status = main_<long long, long long, double> (src, dest, val, &args, vertices, edges);
}
else
{
status = main_<long long, long long, float> (src, dest, val, &args, vertices, edges);
}
}
else
{
if ((val != nullptr) && (val->dtype == GDF_FLOAT64))
{
status = main_ <int, int, double> (src, dest, val, &args, vertices, edges);
}
else
{
status = main_ <int, int, float> (src, dest, val, &args, vertices, edges);
}
}
free_args(argc, arg);
CUGRAPH_EXPECTS((src->size == dest->size), "Column size mismatch");
CUGRAPH_EXPECTS ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH);
CUGRAPH_EXPECTS (src->null_count == 0, "Column must be valid");
return status;
}
| 8dcb13de2f752a4168fc171db78741917a1b2fca.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// Graph generation
// Author: Ramakrishna Prabhu [email protected]
#include <stdio.h>
#include <string>
#include <omp.h>
// Utilities and correctness-checking
#include <gunrock/util/multithread_utils.cuh>
#include <gunrock/util/sort_omp.cuh>
#include <gunrock/csr.cuh>
#include <gunrock/graphio/grmat.cuh>
#include <gunrock/coo.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <gunrock/util/shared_utils.cuh>
#include <cudf/cudf.h>
#include <thrust/extrema.h>
#include "utilities/error_utils.h"
#include "graph_utils.cuh"
#include <rmm_utils.h>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::graphio;
using namespace gunrock::graphio::grmat;
template <typename VertexId, typename Value, typename SizeT>
__global__ void Remove_Self_Loops (VertexId* row, VertexId* col, Value* val, SizeT edges)
{
SizeT i = (SizeT)blockIdx.x * blockDim.x + threadIdx.x;
if (i < edges)
{
if (row[i] == col[i])
{
col[i] = 0;
}
}
}
// rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)
// Generate R-MAT graph as input
// --rmat_scale=<vertex-scale>
// --rmat_nodes=<number-nodes>
// --rmat_edgefactor=<edge-factor>
// --rmat_edges=<number-edges>
// --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>
// --rmat_self_loops If this option is supplied, then self loops will be retained
// --rmat_undirected If this option is not mentioned, then the graps will be undirected
// Optional arguments:
// [--device=<device_index>] Set GPU(s) for testing (Default: 0).
// [--quiet] No output (unless --json is specified).
// [--random_seed] This will enable usage of random seed, else it will use same seed
// [--normalized]\n
template<
typename VertexId,
typename SizeT,
typename Value>
gdf_error main_(gdf_column *src, gdf_column *dest, gdf_column *val, CommandLineArgs *args, size_t &vertices, size_t &edges)
{
CpuTimer cpu_timer, cpu_timer2;
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
SizeT rmat_scale = 10;
SizeT rmat_edgefactor = 48;
double rmat_a = 0.57;
double rmat_b = 0.19;
double rmat_c = 0.19;
double rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
double rmat_vmin = 1;
double rmat_vmultipiler = 64;
int rmat_seed = 888;
bool undirected = false;
bool self_loops = false;
SizeT rmat_all_edges = rmat_edges;
std::string file_name;
bool quiet = false;
typedef Coo_nv<VertexId, Value> EdgeTupleType;
cpu_timer.Start();
if (args->CheckCmdLineFlag ("rmat_scale") && args->CheckCmdLineFlag ("rmat_nodes"))
{
printf ("Please mention scale or nodes, not both \n");
return GDF_UNSUPPORTED_METHOD;
}
else if (args->CheckCmdLineFlag ("rmat_edgefactor") && args->CheckCmdLineFlag ("rmat_edges"))
{
printf ("Please mention edgefactor or edge, not both \n");
return GDF_UNSUPPORTED_METHOD;
}
self_loops = args->CheckCmdLineFlag ("rmat_self_loops");
// graph construction or generation related parameters
if (args -> CheckCmdLineFlag("normalized"))
undirected = args -> CheckCmdLineFlag("rmat_undirected");
else undirected = true; // require undirected input graph when unnormalized
quiet = args->CheckCmdLineFlag("quiet");
args->GetCmdLineArgument("rmat_scale", rmat_scale);
rmat_nodes = 1 << rmat_scale;
args->GetCmdLineArgument("rmat_nodes", rmat_nodes);
args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor);
rmat_edges = rmat_nodes * rmat_edgefactor;
args->GetCmdLineArgument("rmat_edges", rmat_edges);
args->GetCmdLineArgument("rmat_a", rmat_a);
args->GetCmdLineArgument("rmat_b", rmat_b);
args->GetCmdLineArgument("rmat_c", rmat_c);
rmat_d = 1 - (rmat_a + rmat_b + rmat_c);
args->GetCmdLineArgument("rmat_d", rmat_d);
args->GetCmdLineArgument("rmat_vmin", rmat_vmin);
args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler);
args->GetCmdLineArgument("file_name", file_name);
if (args->CheckCmdLineFlag("random_seed"))
{
rmat_seed = -1;
}
EdgeTupleType coo;
if (undirected == true)
{
rmat_all_edges = 2 * rmat_edges;
}
else
{
rmat_all_edges = rmat_edges;
}
std::vector<int> temp_devices;
if (args->CheckCmdLineFlag("device")) // parse device list
{
args->GetCmdLineArguments<int>("device", temp_devices);
}
else // use single device with index 0
{
int gpu_idx;
util::GRError(cudaGetDevice(&gpu_idx),
"cudaGetDevice failed", __FILE__, __LINE__);
temp_devices.push_back(gpu_idx);
}
int *gpu_idx = new int[temp_devices.size()];
for (unsigned int i=0; i<temp_devices.size(); i++)
gpu_idx[i] = temp_devices[i];
if (!quiet)
{
printf ("---------Graph properties-------\n"
" Undirected : %s\n"
" Nodes : %lld\n"
" Edges : %lld\n"
" a = %f, b = %f, c = %f, d = %f\n\n\n", ((undirected == true)? "True": "False"), (long long)rmat_nodes,
(long long)(rmat_edges * ((undirected == true)? 2: 1)), rmat_a, rmat_b, rmat_c, rmat_d);
}
if (util::SetDevice(gpu_idx[0]))
return GDF_CUDA_ERROR;
cudaStream_t stream {nullptr};
ALLOC_TRY((void**)&coo.row, sizeof(VertexId) * rmat_all_edges, stream);
ALLOC_TRY((void**)&coo.col, sizeof(VertexId) * rmat_all_edges, stream);
if (val != nullptr)
{
ALLOC_TRY((void**)&coo.val, sizeof(Value) * rmat_all_edges, stream);
}
if ((coo.row == NULL) ||(coo.col == NULL))
{
if (!quiet)
printf ("Error: Cuda malloc failed \n");
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
return GDF_CUDA_ERROR;
}
cpu_timer2.Start();
cudaError_t status = cudaSuccess;
if(val == nullptr)
status = BuildRmatGraph_coo_nv<false, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected,
rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed,
quiet, temp_devices.size(), gpu_idx);
else
status = BuildRmatGraph_coo_nv<true, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected,
rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed,
quiet, temp_devices.size(), gpu_idx);
cpu_timer2.Stop();
if (status == cudaSuccess)
{
if (!quiet)
printf ("Graph has been generated \n");
}
else
{
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
if (coo.val != nullptr)
ALLOC_FREE_TRY(coo.val, stream);
return GDF_CUDA_ERROR;
}
int block_size = (sizeof(VertexId) == 4) ? 1024 : 512;
int grid_size = rmat_all_edges / block_size + 1;
if (util::SetDevice(gpu_idx[0]))
return GDF_CUDA_ERROR;
if ((self_loops != false) && (val != nullptr))
{
Remove_Self_Loops
<VertexId, Value, SizeT>
<<<grid_size, block_size, 0>>>
(coo.row, coo.col, coo.val, rmat_all_edges);
}
cugraph::detail::remove_duplicate (coo.row, coo.col, coo.val, rmat_all_edges);
thrust::device_ptr<VertexId> tmp;
VertexId nodes_row = 0;
VertexId nodes_col = 0;
cudaMemcpy((void*)&nodes_row, (void*)&(coo.row[rmat_all_edges-1]), sizeof(VertexId), cudaMemcpyDeviceToHost);
tmp = thrust::max_element(rmm::exec_policy(stream)->on(stream),
thrust::device_pointer_cast((VertexId*)(coo.col)),
thrust::device_pointer_cast((VertexId*)(coo.col + rmat_all_edges)));
nodes_col = tmp[0];
VertexId max_nodes = (nodes_row > nodes_col)? nodes_row: nodes_col;
cpu_timer.Stop();
if ((src != nullptr) && (dest != nullptr))
{
src->data = coo.row;
src->size = rmat_all_edges;
src->valid = nullptr;
dest->data = coo.col;
dest->size = rmat_all_edges;
dest->valid = nullptr;
}
else
{
if (coo.row != nullptr)
ALLOC_FREE_TRY(coo.row, stream);
if (coo.col != nullptr)
ALLOC_FREE_TRY(coo.col, stream);
if (coo.val != nullptr)
ALLOC_FREE_TRY(coo.val, stream);
if (!quiet)
printf ("Error : Pointers for gdf column are null, releasing allocated memory for graph\n");
return GDF_CUDA_ERROR;
}
if (val != nullptr)
{
val->data = coo.val;
val->size = rmat_all_edges;
val->valid = nullptr;
}
vertices = max_nodes+1;
edges = rmat_all_edges;
if (!quiet)
printf ("Time to generate the graph %f ms\n"
"Total time %f ms\n", cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis());
}
void free_args (char argc, char** args)
{
for (int i = 0; i < argc; i++)
free(args[i]);
}
gdf_error gdf_grmat_gen (const char* argv, size_t& vertices, size_t& edges, gdf_column *src, gdf_column *dest, gdf_column *val)
{
int argc = 0;
char* arg[32] = {0};
char* tmp = nullptr;
char tmp_argv [1024] = {0};
strcpy(tmp_argv, argv);
tmp = strtok (tmp_argv, " ");
for (int i = 0; tmp != nullptr; i++)
{
arg[i] = (char*) malloc (sizeof(char)*(strlen(tmp)+1));
strcpy(arg[i], tmp);
argc += 1;
tmp = strtok(NULL, " ");
}
CommandLineArgs args(argc, arg);
int graph_args = argc - args.ParsedArgc() - 1;
gdf_error status = GDF_CUDA_ERROR;
if (src == nullptr || dest == nullptr)
{
free_args(argc, arg);
return GDF_DATASET_EMPTY;
}
CUGRAPH_EXPECTS ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH);
CUGRAPH_EXPECTS (src->null_count == 0, "Column must be valid");
if (argc < 2 || args.CheckCmdLineFlag("help"))
{
free_args(argc, arg);
return GDF_UNSUPPORTED_METHOD;
}
if (src->dtype == GDF_INT64)
{
if ((val != nullptr) && (val->dtype == GDF_FLOAT64))
{
status = main_<long long, long long, double> (src, dest, val, &args, vertices, edges);
}
else
{
status = main_<long long, long long, float> (src, dest, val, &args, vertices, edges);
}
}
else
{
if ((val != nullptr) && (val->dtype == GDF_FLOAT64))
{
status = main_ <int, int, double> (src, dest, val, &args, vertices, edges);
}
else
{
status = main_ <int, int, float> (src, dest, val, &args, vertices, edges);
}
}
free_args(argc, arg);
CUGRAPH_EXPECTS((src->size == dest->size), "Column size mismatch");
CUGRAPH_EXPECTS ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH);
CUGRAPH_EXPECTS (src->null_count == 0, "Column must be valid");
return status;
}
|
04b49dce038b80294f2abe53c641db60559eddd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv.h>
#include <highgui.h>
using namespace std;
using namespace cv;
typedef complex<float> base;
typedef float2 Complex;
template <typename T>
ostream &operator<<(ostream &o, vector<T> v)
{
if (v.size() > 0)
o << v[0];
for (unsigned i = 1; i < v.size(); i++)
o << " " << v[i];
return o << endl;
}
static __device__ __host__ inline Complex Add(Complex A, Complex B)
{
Complex C;
C.x = A.x + B.x;
C.y = A.y + B.y;
return C;
}
/**
* Inverse of Complex Number
*/
static __device__ __host__ inline Complex Inverse(Complex A)
{
Complex C;
C.x = -A.x;
C.y = -A.y;
return C;
}
/**
* Multipication of Complex Numbers
*/
static __device__ __host__ inline Complex Multiply(Complex A, Complex B)
{
Complex C;
C.x = A.x * B.x - A.y * B.y;
C.y = A.y * B.x + A.x * B.y;
return C;
}
/**
* Parallel Functions for performing various tasks
*/
/**
* Dividing by constant for inverse fft transform
*/
__global__ void inplace_divide_invert(Complex *A, int n, int threads)
{
int i = blockIdx.x * threads + threadIdx.x;
if (i < n)
{
// printf("in divide");
A[i].x /= n;
A[i].y /= n;
}
else
{
// printf("else in divide");
// printf("i=%d, n=%d", i, n);
}
}
/**
* Reorders array by bit-reversing the indexes.
*/
__global__ void bitrev_reorder(Complex *__restrict__ r, Complex *__restrict__ d, int s, size_t nthr, int n)
{
int id = blockIdx.x * nthr + threadIdx.x;
//r[id].x = -1;
if (id < n and __brev(id) >> (32 - s) < n)
r[__brev(id) >> (32 - s)] = d[id];
}
/**
* Inner part of the for loop
*/
__device__ void inplace_fft_inner(Complex *__restrict__ A, int i, int j, int len, int n, bool invert)
{
if (i + j + len / 2 < n and j < len / 2)
{
Complex u, v;
float angle = (2 * M_PI * j) / (len * (invert ? -1.0 : 1.0));
v.x = cos(angle);
v.y = sin(angle);
u = A[i + j];
v = Multiply(A[i + j + len / 2], v);
// printf("i:%d j:%d u_x:%f u_y:%f v_x:%f v_y:%f\n", i, j, u.x, u.y, v.x, v.y);
A[i + j] = Add(u, v);
A[i + j + len / 2] = Add(u, Inverse(v));
}
}
/**
* FFT if number of threads are sufficient.
*/
__global__ void inplace_fft(Complex *__restrict__ A, int i, int len, int n, int threads, bool invert)
{
int j = blockIdx.x * threads + threadIdx.x;
inplace_fft_inner(A, i, j, len, n, invert);
}
/**
* FFt if number of threads are not sufficient.
*/
__global__ void inplace_fft_outer(Complex *__restrict__ A, int len, int n, int threads, bool invert)
{
int i = (blockIdx.x * threads + threadIdx.x);
for (int j = 0; j < len / 2; j++)
{
inplace_fft_inner(A, i, j, len, n, invert);
}
}
/**
* parallel FFT transform and inverse transform
* Arguments vector of complex numbers, invert, balance, number of threads
* Perform inplace transform
*/
void fft(vector<base> &a, bool invert, int balance = 10, int threads = 32)
{
// Creating array from vector
int n = (int)a.size();
int data_size = n * sizeof(Complex);
Complex *data_array = (Complex *)malloc(data_size);
for (int i = 0; i < n; i++)
{
data_array[i].x = a[i].real();
data_array[i].y = a[i].imag();
}
// Copying data to GPU
Complex *A, *dn;
hipMalloc((void **)&A, data_size);
hipMalloc((void **)&dn, data_size);
hipMemcpy(dn, data_array, data_size, hipMemcpyHostToDevice);
// Bit reversal reordering
int s = log2(n);
hipLaunchKernelGGL(( bitrev_reorder), dim3(ceil(float(n) / threads)), dim3(threads), 0, 0, A, dn, s, threads, n);
// Synchronize
hipDeviceSynchronize();
// Iterative FFT with loop parallelism balancing
for (int len = 2; len <= n; len <<= 1)
{
if (n / len > balance)
{
hipLaunchKernelGGL(( inplace_fft_outer), dim3(ceil((float)n / threads)), dim3(threads), 0, 0, A, len, n, threads, invert);
}
else
{
for (int i = 0; i < n; i += len)
{
float repeats = len / 2;
hipLaunchKernelGGL(( inplace_fft), dim3(ceil(repeats / threads)), dim3(threads), 0, 0, A, i, len, n, threads, invert);
}
}
}
if (invert)
hipLaunchKernelGGL(( inplace_divide_invert), dim3(ceil(n * 1.00 / threads)), dim3(threads), 0, 0, A, n, threads);
// Copy data from GPU
Complex *result;
result = (Complex *)malloc(data_size);
hipMemcpy(result, A, data_size, hipMemcpyDeviceToHost);
// Saving data to vector<complex> in input.
for (int i = 0; i < n; i++)
{
a[i] = base(result[i].x, result[i].y);
}
// Free the memory blocks
free(data_array);
hipFree(A);
hipFree(dn);
return;
}
/**
* Performs 2D FFT
* takes vector of complex vectors, invert and verbose as argument
* performs inplace FFT transform on input vector
*/
void fft2D(vector<vector<base>> &a, bool invert, int verbose = 0)
{
auto matrix = a;
// Transform the rows
if (verbose > 0)
cout << "Transforming Rows" << endl;
for (auto i = 0; i < matrix.size(); i++)
{
//cout<<i<<endl;
fft(matrix[i], invert);
}
// preparing for transforming columns
if (verbose > 0)
cout << "Converting Rows to Columns" << endl;
a = matrix;
matrix.resize(a[0].siz*e());
for (int i = 0; i < matrix.size(); i++)
matrix[i].resize(a.size());
// Transposing matrix
for (int i = 0; i < a.size(); i++)
{
for (int j = 0; j < a[0].size(); j++)
{
matrix[j][i] = a[i][j];
}
}
if (verbose > 0)
cout << "Transforming Columns" << endl;
// Transform the columns
for (auto i = 0; i < matrix.size(); i++)
fft(matrix[i], invert);
if (verbose > 0)
cout << "Storing the result" << endl;
// Storing the result after transposing
// [j][i] is getting value of [i][j]
for (int i = 0; i < a.size(); i++)
{
for (int j = 0; j < a[0].size(); j++)
{
a[j][i] = matrix[i][j];
}
}
}
/**
* Function to multiply two polynomial
* takes two polynomials represented as vectors as input
* return the product of two vectors
*/
vector<int> mult(vector<int> a, vector<int> b)
{
// Creating complex vector from input vectors
vector<base> fa(a.begin(), a.end()), fb(b.begin(), b.end());
// Padding with zero to make their size equal to power of 2
size_t n = 1;
while (n < max(a.size(), b.size()))
n <<= 1;
n <<= 1;
fa.resize(n), fb.resize(n);
// Transforming both a and b
// Converting to points form
fft(fa, false), fft(fb, false);
cout << fa << endl;
cout << endl;
cout << fb << endl;
cout << endl;
// performing point wise multipication of points
for (size_t i = 0; i < n; ++i)
fa[i] *= fb[i];
// Performing Inverse transform
fft(fa, true);
// Saving the real part as it will be the result
vector<int> res;
res.resize(n);
for (size_t i = 0; i < n; ++i)
res[i] = int(fa[i].real() + 0.5);
return res;
}
int main()
{
vector<int> a; //= {1, 1}; //{3,4,-5,2};
vector<int> b; // = {2, 1}; //{2,1,1,-9};
for (int i = 0; i < 4; i++)
{
a.push_back(i);
b.push_back(i);
}
vector<base> fa(a.begin(), a.end()), fb(b.begin(), b.end());
fft(fa, false);
cout << "###################################" << endl;
cout << fa << endl;
cout << endl;
cout << endl;
fft(fa, true);
cout << endl;
for (int i = 0; i < 4; i++)
b[i] = fa[i].real();
if (b == a)
{
cout << "Yes" << endl;
}
cout << b << endl;
// auto fft = FFT();
//cout << mult(a, b);
return 0;
}
| 04b49dce038b80294f2abe53c641db60559eddd0.cu | #include <bits/stdc++.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv.h>
#include <highgui.h>
using namespace std;
using namespace cv;
typedef complex<float> base;
typedef float2 Complex;
template <typename T>
ostream &operator<<(ostream &o, vector<T> v)
{
if (v.size() > 0)
o << v[0];
for (unsigned i = 1; i < v.size(); i++)
o << " " << v[i];
return o << endl;
}
static __device__ __host__ inline Complex Add(Complex A, Complex B)
{
Complex C;
C.x = A.x + B.x;
C.y = A.y + B.y;
return C;
}
/**
* Inverse of Complex Number
*/
static __device__ __host__ inline Complex Inverse(Complex A)
{
Complex C;
C.x = -A.x;
C.y = -A.y;
return C;
}
/**
* Multipication of Complex Numbers
*/
static __device__ __host__ inline Complex Multiply(Complex A, Complex B)
{
Complex C;
C.x = A.x * B.x - A.y * B.y;
C.y = A.y * B.x + A.x * B.y;
return C;
}
/**
* Parallel Functions for performing various tasks
*/
/**
* Dividing by constant for inverse fft transform
*/
__global__ void inplace_divide_invert(Complex *A, int n, int threads)
{
int i = blockIdx.x * threads + threadIdx.x;
if (i < n)
{
// printf("in divide");
A[i].x /= n;
A[i].y /= n;
}
else
{
// printf("else in divide");
// printf("i=%d, n=%d", i, n);
}
}
/**
* Reorders array by bit-reversing the indexes.
*/
__global__ void bitrev_reorder(Complex *__restrict__ r, Complex *__restrict__ d, int s, size_t nthr, int n)
{
int id = blockIdx.x * nthr + threadIdx.x;
//r[id].x = -1;
if (id < n and __brev(id) >> (32 - s) < n)
r[__brev(id) >> (32 - s)] = d[id];
}
/**
* Inner part of the for loop
*/
__device__ void inplace_fft_inner(Complex *__restrict__ A, int i, int j, int len, int n, bool invert)
{
if (i + j + len / 2 < n and j < len / 2)
{
Complex u, v;
float angle = (2 * M_PI * j) / (len * (invert ? -1.0 : 1.0));
v.x = cos(angle);
v.y = sin(angle);
u = A[i + j];
v = Multiply(A[i + j + len / 2], v);
// printf("i:%d j:%d u_x:%f u_y:%f v_x:%f v_y:%f\n", i, j, u.x, u.y, v.x, v.y);
A[i + j] = Add(u, v);
A[i + j + len / 2] = Add(u, Inverse(v));
}
}
/**
* FFT if number of threads are sufficient.
*/
__global__ void inplace_fft(Complex *__restrict__ A, int i, int len, int n, int threads, bool invert)
{
int j = blockIdx.x * threads + threadIdx.x;
inplace_fft_inner(A, i, j, len, n, invert);
}
/**
* FFt if number of threads are not sufficient.
*/
__global__ void inplace_fft_outer(Complex *__restrict__ A, int len, int n, int threads, bool invert)
{
int i = (blockIdx.x * threads + threadIdx.x);
for (int j = 0; j < len / 2; j++)
{
inplace_fft_inner(A, i, j, len, n, invert);
}
}
/**
* parallel FFT transform and inverse transform
* Arguments vector of complex numbers, invert, balance, number of threads
* Perform inplace transform
*/
void fft(vector<base> &a, bool invert, int balance = 10, int threads = 32)
{
// Creating array from vector
int n = (int)a.size();
int data_size = n * sizeof(Complex);
Complex *data_array = (Complex *)malloc(data_size);
for (int i = 0; i < n; i++)
{
data_array[i].x = a[i].real();
data_array[i].y = a[i].imag();
}
// Copying data to GPU
Complex *A, *dn;
cudaMalloc((void **)&A, data_size);
cudaMalloc((void **)&dn, data_size);
cudaMemcpy(dn, data_array, data_size, cudaMemcpyHostToDevice);
// Bit reversal reordering
int s = log2(n);
bitrev_reorder<<<ceil(float(n) / threads), threads>>>(A, dn, s, threads, n);
// Synchronize
cudaDeviceSynchronize();
// Iterative FFT with loop parallelism balancing
for (int len = 2; len <= n; len <<= 1)
{
if (n / len > balance)
{
inplace_fft_outer<<<ceil((float)n / threads), threads>>>(A, len, n, threads, invert);
}
else
{
for (int i = 0; i < n; i += len)
{
float repeats = len / 2;
inplace_fft<<<ceil(repeats / threads), threads>>>(A, i, len, n, threads, invert);
}
}
}
if (invert)
inplace_divide_invert<<<ceil(n * 1.00 / threads), threads>>>(A, n, threads);
// Copy data from GPU
Complex *result;
result = (Complex *)malloc(data_size);
cudaMemcpy(result, A, data_size, cudaMemcpyDeviceToHost);
// Saving data to vector<complex> in input.
for (int i = 0; i < n; i++)
{
a[i] = base(result[i].x, result[i].y);
}
// Free the memory blocks
free(data_array);
cudaFree(A);
cudaFree(dn);
return;
}
/**
* Performs 2D FFT
* takes vector of complex vectors, invert and verbose as argument
* performs inplace FFT transform on input vector
*/
void fft2D(vector<vector<base>> &a, bool invert, int verbose = 0)
{
auto matrix = a;
// Transform the rows
if (verbose > 0)
cout << "Transforming Rows" << endl;
for (auto i = 0; i < matrix.size(); i++)
{
//cout<<i<<endl;
fft(matrix[i], invert);
}
// preparing for transforming columns
if (verbose > 0)
cout << "Converting Rows to Columns" << endl;
a = matrix;
matrix.resize(a[0].siz*e());
for (int i = 0; i < matrix.size(); i++)
matrix[i].resize(a.size());
// Transposing matrix
for (int i = 0; i < a.size(); i++)
{
for (int j = 0; j < a[0].size(); j++)
{
matrix[j][i] = a[i][j];
}
}
if (verbose > 0)
cout << "Transforming Columns" << endl;
// Transform the columns
for (auto i = 0; i < matrix.size(); i++)
fft(matrix[i], invert);
if (verbose > 0)
cout << "Storing the result" << endl;
// Storing the result after transposing
// [j][i] is getting value of [i][j]
for (int i = 0; i < a.size(); i++)
{
for (int j = 0; j < a[0].size(); j++)
{
a[j][i] = matrix[i][j];
}
}
}
/**
* Function to multiply two polynomial
* takes two polynomials represented as vectors as input
* return the product of two vectors
*/
vector<int> mult(vector<int> a, vector<int> b)
{
// Creating complex vector from input vectors
vector<base> fa(a.begin(), a.end()), fb(b.begin(), b.end());
// Padding with zero to make their size equal to power of 2
size_t n = 1;
while (n < max(a.size(), b.size()))
n <<= 1;
n <<= 1;
fa.resize(n), fb.resize(n);
// Transforming both a and b
// Converting to points form
fft(fa, false), fft(fb, false);
cout << fa << endl;
cout << endl;
cout << fb << endl;
cout << endl;
// performing point wise multipication of points
for (size_t i = 0; i < n; ++i)
fa[i] *= fb[i];
// Performing Inverse transform
fft(fa, true);
// Saving the real part as it will be the result
vector<int> res;
res.resize(n);
for (size_t i = 0; i < n; ++i)
res[i] = int(fa[i].real() + 0.5);
return res;
}
int main()
{
vector<int> a; //= {1, 1}; //{3,4,-5,2};
vector<int> b; // = {2, 1}; //{2,1,1,-9};
for (int i = 0; i < 4; i++)
{
a.push_back(i);
b.push_back(i);
}
vector<base> fa(a.begin(), a.end()), fb(b.begin(), b.end());
fft(fa, false);
cout << "###################################" << endl;
cout << fa << endl;
cout << endl;
cout << endl;
fft(fa, true);
cout << endl;
for (int i = 0; i < 4; i++)
b[i] = fa[i].real();
if (b == a)
{
cout << "Yes" << endl;
}
cout << b << endl;
// auto fft = FFT();
//cout << mult(a, b);
return 0;
}
|
a7b48eba779a778c15b40a94012961a57b79f746.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include "../shader.cu"
#define EPS 0.0002
__device__ vec2 SkyBox(vec3 p){
vec2 plane = vec2(abs(p.y),1);
vec2 plane2 = vec2(abs(p.y+10),1);
vec2 plane3 = vec2(abs(p.x-10),1);
vec2 plane4 = vec2(abs(p.x+10),1);
vec2 plane5 = vec2(abs(p.z+10),1);
vec2 plane6 = vec2(abs(p.z-10),1);
return min(min(min(min(min(plane,plane2),plane3),plane4),plane5),plane6);
}
__device__ vec2 map(vec3 p){
vec2 sky = SkyBox(p);
vec2 ball = vec2(dist(p+vec3(0.7-3,-1.8,0.2+cosfr),vec3(0.5))-3,4);
p.x = fract(p.x/4)*4-2;
// p.y = fract(p.y/4)*4-2;
vec2 cube = vec2(length(max(abs(p-vec3(0,1+sinfr,0))-vec3(0.5),0.0+sinfr*0.275))-0.5,1);
//cube = abs(cube)-0.1;
return min(min(cube,ball),sky);
}
__device__ vec3 normal(vec3 p){
vec3 q = vec3(map(vec3(p.x + EPS, p.y, p.z)).x - map(vec3(p.x - EPS, p.y, p.z)).x,
map(vec3(p.x, p.y + EPS, p.z )).x - map(vec3(p.x, p.y - EPS, p.z)).x,
map(vec3(p.x, p.y, p.z + EPS)).x - map(vec3(p.x, p.y, p.z - EPS)).x);
return normalize(q);
}
__device__ vec3 trace(vec3 org, vec3 dir){
vec3 cl,p;
vec3 light = vec3(sinfr*2,5.0+3*sinfr,-2.0);
vec2 d;
float dist = 0.0;
for(int i = 0; i < 1024; i++)
{
//d.y = 0;
p = org+dir*dist;
d = map(p);
if( d.x <= 0.001){
if (d.y==4){
dir = reflect(dir,normal(p));
dist = 0.01;
org = p;
}
else
break;
}
else if (dist > 200)
{
d.y = 0;
break;
}
else
d.y = 0;
dist += d.x;
}
//if(d.y == 1)
// p = trace(p+vec3(cosfr,sinfr,0),dir);
vec3 norm = normal(p);
vec3 reflection = dir - norm* 2 * dot(dir, norm);
vec3 c3po = vec3(0.8,1.0,0.8);
c3po = c3po * dot(norm, normalize(light-p));
vec3 ambient = vec3(0.3,0.4,0.65);
c3po = c3po + ambient + vec3(1,1,1);
float spec = pow(max(0.0,dot(reflection,normalize(light-p))),10);
cl.x = dist*15*norm.x;
cl.y = dist*5*norm.y;
cl.z = dist*15*norm.z;
if(d.y==5)//sky
cl = (c3po+ vec3(1)*spec+ambient)*25;//(c3po+ vec3(1)*spec+ambient)*40;
if(d.y== 2 || d.y == 1) //ball
{
cl = (c3po+ vec3(1)*spec+ambient)*50;
}
if(d.y == 0)
cl = vec3(0);
if(d.y == 1) //cube
cl.z = cl.y/2;
return cl;
}
__global__ void Mandel_calc(unsigned char* image_buffer){
unsigned short int row = (blockIdx.y * blockDim.y + threadIdx.y); // WIDTH
unsigned short int col = (blockIdx.x * blockDim.x + threadIdx.x); // HEIGHT
unsigned int idx = 3*(row * window.x + col);
float y0 = - (float) (row -window.x/2)/(window.x/2);
float x0 = (float) (col -window.y/2)/(window.y/2);
vec3 direction = normalize(vec3(x0+0.5+sinfr,y0-0.6, 1.0));
vec3 origin = vec3(1.0-3,7.0,-12.0 );
vec3 cl = trace(origin,direction);
color(cl,&image_buffer[idx]);
}
#include "../main.cu"
| a7b48eba779a778c15b40a94012961a57b79f746.cu | #include <math.h>
#include <stdio.h>
#include "../shader.cu"
#define EPS 0.0002
__device__ vec2 SkyBox(vec3 p){
vec2 plane = vec2(abs(p.y),1);
vec2 plane2 = vec2(abs(p.y+10),1);
vec2 plane3 = vec2(abs(p.x-10),1);
vec2 plane4 = vec2(abs(p.x+10),1);
vec2 plane5 = vec2(abs(p.z+10),1);
vec2 plane6 = vec2(abs(p.z-10),1);
return min(min(min(min(min(plane,plane2),plane3),plane4),plane5),plane6);
}
__device__ vec2 map(vec3 p){
vec2 sky = SkyBox(p);
vec2 ball = vec2(dist(p+vec3(0.7-3,-1.8,0.2+cosfr),vec3(0.5))-3,4);
p.x = fract(p.x/4)*4-2;
// p.y = fract(p.y/4)*4-2;
vec2 cube = vec2(length(max(abs(p-vec3(0,1+sinfr,0))-vec3(0.5),0.0+sinfr*0.275))-0.5,1);
//cube = abs(cube)-0.1;
return min(min(cube,ball),sky);
}
__device__ vec3 normal(vec3 p){
vec3 q = vec3(map(vec3(p.x + EPS, p.y, p.z)).x - map(vec3(p.x - EPS, p.y, p.z)).x,
map(vec3(p.x, p.y + EPS, p.z )).x - map(vec3(p.x, p.y - EPS, p.z)).x,
map(vec3(p.x, p.y, p.z + EPS)).x - map(vec3(p.x, p.y, p.z - EPS)).x);
return normalize(q);
}
__device__ vec3 trace(vec3 org, vec3 dir){
vec3 cl,p;
vec3 light = vec3(sinfr*2,5.0+3*sinfr,-2.0);
vec2 d;
float dist = 0.0;
for(int i = 0; i < 1024; i++)
{
//d.y = 0;
p = org+dir*dist;
d = map(p);
if( d.x <= 0.001){
if (d.y==4){
dir = reflect(dir,normal(p));
dist = 0.01;
org = p;
}
else
break;
}
else if (dist > 200)
{
d.y = 0;
break;
}
else
d.y = 0;
dist += d.x;
}
//if(d.y == 1)
// p = trace(p+vec3(cosfr,sinfr,0),dir);
vec3 norm = normal(p);
vec3 reflection = dir - norm* 2 * dot(dir, norm);
vec3 c3po = vec3(0.8,1.0,0.8);
c3po = c3po * dot(norm, normalize(light-p));
vec3 ambient = vec3(0.3,0.4,0.65);
c3po = c3po + ambient + vec3(1,1,1);
float spec = pow(max(0.0,dot(reflection,normalize(light-p))),10);
cl.x = dist*15*norm.x;
cl.y = dist*5*norm.y;
cl.z = dist*15*norm.z;
if(d.y==5)//sky
cl = (c3po+ vec3(1)*spec+ambient)*25;//(c3po+ vec3(1)*spec+ambient)*40;
if(d.y== 2 || d.y == 1) //ball
{
cl = (c3po+ vec3(1)*spec+ambient)*50;
}
if(d.y == 0)
cl = vec3(0);
if(d.y == 1) //cube
cl.z = cl.y/2;
return cl;
}
__global__ void Mandel_calc(unsigned char* image_buffer){
unsigned short int row = (blockIdx.y * blockDim.y + threadIdx.y); // WIDTH
unsigned short int col = (blockIdx.x * blockDim.x + threadIdx.x); // HEIGHT
unsigned int idx = 3*(row * window.x + col);
float y0 = - (float) (row -window.x/2)/(window.x/2);
float x0 = (float) (col -window.y/2)/(window.y/2);
vec3 direction = normalize(vec3(x0+0.5+sinfr,y0-0.6, 1.0));
vec3 origin = vec3(1.0-3,7.0,-12.0 );
vec3 cl = trace(origin,direction);
color(cl,&image_buffer[idx]);
}
#include "../main.cu"
|
9ace0159431c85d97efdbfa6478d45717ebfc93c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all
CUTLASS-supported numeric types and is safe to use in a union.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convert_bf16_f32(cutlass::bfloat16_t* output,
float const* input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]);
}
}
__global__ void convert_and_pack_bf16(cutlass::bfloat16_t* output,
float const* input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid * 2 < N) {
cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert;
cutlass::Array<cutlass::bfloat16_t, 2>* dst_ptr =
reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2>*>(
output + tid * 2);
cutlass::Array<float, 2> const* src_ptr =
reinterpret_cast<cutlass::Array<float, 2> const*>(input +
tid * 2);
*dst_ptr = convert(*src_ptr);
}
}
TEST(bfloat16_t, device_conversion) {
using T = cutlass::bfloat16_t;
using S = float;
int const N = 256;
cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1});
cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1});
for (int i = 0; i < N; ++i) {
source.at({i, 0}) = float(i - 128);
destination.at({i, 0}) = T(0);
}
source.sync_device();
destination.sync_device();
hipLaunchKernelGGL(( convert_bf16_f32), dim3(dim3(1, 1)), dim3(dim3(N, 1)), 0, 0, destination.device_data(),
source.device_data(), N);
ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error.";
destination.sync_host();
int errors = 0;
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Basic conversion error - [" << i << "] - got "
<< got << ", expected " << expected << "\n";
}
}
destination.at({i, 0}) = T(0);
}
destination.sync_device();
hipLaunchKernelGGL(( convert_and_pack_bf16), dim3(dim3(1, 1)), dim3(dim3(N, 1)), 0, 0, destination.device_data(),
source.device_data(), N);
ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error.";
destination.sync_host();
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Convert and pack error - [" << i << "] - got "
<< got << ", expected " << expected << "\n";
}
}
}
EXPECT_EQ(errors, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(bfloat16_t, host_conversion) {
for (int i = -128; i < 128; ++i) {
float f = static_cast<float>(i);
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f);
EXPECT_TRUE(static_cast<int>(x) == i);
EXPECT_TRUE(static_cast<float>(y) == f);
}
// Try out user-defined literals
EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16);
EXPECT_TRUE(7 == static_cast<int>(7_bf16));
}
TEST(bfloat16_t, host_arithmetic) {
for (int i = -100; i < 100; ++i) {
for (int j = -100; j < 100; ++j) {
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j);
EXPECT_TRUE(static_cast<int>(x + y) == (i + j));
}
}
}
TEST(bfloat16_t, host_round) {
struct {
uint32_t f32_bits;
uint16_t expected;
} tests[] = {{0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz
{0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz
{0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf
{0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz
{0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz
{0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf
{0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf
{0x7f800000, 0x7f80}, // +inf
{0xff800000, 0xff80}, // -inf
{0x7fffffff, 0x7fff}, // canonical NaN
{0x7ff00001, 0x7fff}, // NaN -> canonical NaN
{0xfff00010, 0x7fff}, // Nan -> canonical NaN
{0, 0}};
bool running = true;
for (int i = 0; running; ++i) {
float f32 = reinterpret_cast<float const&>(tests[i].f32_bits);
cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32);
bool passed = (tests[i].expected == bf16.raw());
EXPECT_TRUE(passed)
<< "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits
<< ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x"
<< std::hex << bf16.raw();
if (!tests[i].f32_bits) {
running = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Device
//
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9ace0159431c85d97efdbfa6478d45717ebfc93c.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all
CUTLASS-supported numeric types and is safe to use in a union.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convert_bf16_f32(cutlass::bfloat16_t* output,
float const* input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]);
}
}
__global__ void convert_and_pack_bf16(cutlass::bfloat16_t* output,
float const* input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid * 2 < N) {
cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert;
cutlass::Array<cutlass::bfloat16_t, 2>* dst_ptr =
reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2>*>(
output + tid * 2);
cutlass::Array<float, 2> const* src_ptr =
reinterpret_cast<cutlass::Array<float, 2> const*>(input +
tid * 2);
*dst_ptr = convert(*src_ptr);
}
}
TEST(bfloat16_t, device_conversion) {
using T = cutlass::bfloat16_t;
using S = float;
int const N = 256;
cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1});
cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1});
for (int i = 0; i < N; ++i) {
source.at({i, 0}) = float(i - 128);
destination.at({i, 0}) = T(0);
}
source.sync_device();
destination.sync_device();
convert_bf16_f32<<<dim3(1, 1), dim3(N, 1)>>>(destination.device_data(),
source.device_data(), N);
ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error.";
destination.sync_host();
int errors = 0;
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Basic conversion error - [" << i << "] - got "
<< got << ", expected " << expected << "\n";
}
}
destination.at({i, 0}) = T(0);
}
destination.sync_device();
convert_and_pack_bf16<<<dim3(1, 1), dim3(N, 1)>>>(destination.device_data(),
source.device_data(), N);
ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error.";
destination.sync_host();
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Convert and pack error - [" << i << "] - got "
<< got << ", expected " << expected << "\n";
}
}
}
EXPECT_EQ(errors, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(bfloat16_t, host_conversion) {
for (int i = -128; i < 128; ++i) {
float f = static_cast<float>(i);
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f);
EXPECT_TRUE(static_cast<int>(x) == i);
EXPECT_TRUE(static_cast<float>(y) == f);
}
// Try out user-defined literals
EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16);
EXPECT_TRUE(7 == static_cast<int>(7_bf16));
}
TEST(bfloat16_t, host_arithmetic) {
for (int i = -100; i < 100; ++i) {
for (int j = -100; j < 100; ++j) {
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j);
EXPECT_TRUE(static_cast<int>(x + y) == (i + j));
}
}
}
TEST(bfloat16_t, host_round) {
struct {
uint32_t f32_bits;
uint16_t expected;
} tests[] = {{0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz
{0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz
{0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf
{0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz
{0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz
{0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf
{0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf
{0x7f800000, 0x7f80}, // +inf
{0xff800000, 0xff80}, // -inf
{0x7fffffff, 0x7fff}, // canonical NaN
{0x7ff00001, 0x7fff}, // NaN -> canonical NaN
{0xfff00010, 0x7fff}, // Nan -> canonical NaN
{0, 0}};
bool running = true;
for (int i = 0; running; ++i) {
float f32 = reinterpret_cast<float const&>(tests[i].f32_bits);
cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32);
bool passed = (tests[i].expected == bf16.raw());
EXPECT_TRUE(passed)
<< "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits
<< ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x"
<< std::hex << bf16.raw();
if (!tests[i].f32_bits) {
running = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Device
//
/////////////////////////////////////////////////////////////////////////////////////////////////
|
9502948649fdd447475ccc13c32f167c3883618f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#define N 20480
// declare the kernel
__global__ void daxpy(double a, double *x, double *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
y[i] += a*x[i];
}
}
int main(void) {
double *x, *y, a;
double m = -1.;
double tmp;
int i;
size_t size = N*sizeof(double);
// allocate unified memory
hipMallocManaged(&x, size);
hipMallocManaged(&y, size);
// initialize x and y
srand(time(NULL));
a = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
x[i] = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
y[i] = (double)random() / RAND_MAX;
// launch the kernel function
hipLaunchKernelGGL(( daxpy), dim3(N/256),dim3(256), 0, 0, a, x, y);
hipDeviceSynchronize();
// deallocate device memory
hipFree(x);
hipFree(y);
}
| 9502948649fdd447475ccc13c32f167c3883618f.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#define N 20480
// declare the kernel
__global__ void daxpy(double a, double *x, double *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
y[i] += a*x[i];
}
}
int main(void) {
double *x, *y, a;
double m = -1.;
double tmp;
int i;
size_t size = N*sizeof(double);
// allocate unified memory
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// initialize x and y
srand(time(NULL));
a = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
x[i] = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
y[i] = (double)random() / RAND_MAX;
// launch the kernel function
daxpy<<<N/256,256>>>(a, x, y);
cudaDeviceSynchronize();
// deallocate device memory
cudaFree(x);
cudaFree(y);
}
|
b8737a95580662ddfd31739dce4ec451565d3c90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "svd.h"
#include "based.h"
void streamedtsvd(float* t,const int m,const int n,const int tupe,float* U,float* S,float* V){
int ht = tupe/2+1;
int bat = m*n;
float* d_t;
hipfftComplex* d_fftData;
hipMalloc((void**)&d_t,sizeof(float)*bat*tupe);
hipMalloc((void**)&d_fftData,sizeof(hipfftComplex)*bat*ht);
hipMemcpy(d_t,t,sizeof(float)*bat*tupe,hipMemcpyHostToDevice);
//tff
hipfftHandle plan;
int n_f[1] = {tupe};
int stride = bat,dist = 1;
int in[1] = {tupe};
int on[1] = {ht};
size_t worksize=0;
if (hipfftPlanMany(&plan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_R2C,bat)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
/* if(hipfftGetSizeMany(plan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_R2C,bat,&worksize)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
printf("the work size is:%lf G\n",(double)worksize/(1024*1024*1024));
*/
if(hipfftExecR2C(plan,d_t,(hipfftComplex*)d_fftData)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipFree(d_t);
if(hipfftDestroy(plan)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
//set stream for t
hipStream_t* stream = (hipStream_t*)malloc(PLAN1D_SIZE*sizeof(hipStream_t));
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
hipStreamCreate(&stream[i]);
}
#if 1//tsvd
hipsolverDnHandle_t* handle=(hipsolverDnHandle_t*)malloc(PLAN1D_SIZE*sizeof(hipsolverDnHandle_t));
hipsolverGesvdjInfo_t* params=(hipsolverGesvdjInfo_t*)malloc(ht*sizeof(hipsolverGesvdjInfo_t));
int* info = NULL;
int echo = 1;
int lda = m;
int ldu = m;
int ldv = n;
int* lwork = (int*)malloc(ht*sizeof(int));
hipComplex** work=NULL;
//malloc u s v
float* d_s = NULL;
hipComplex* d_u = NULL;
hipComplex* d_v = NULL;
hipMalloc((void**)&d_s,sizeof(float)*ht*((m<n)?m:n));
hipMalloc((void**)&d_u,sizeof(hipComplex)*ht*m*((m<n)?m:n));
hipMalloc((void**)&d_v,sizeof(hipComplex)*ht*n*((m<n)?m:n));
hipMalloc((void**)&info,sizeof(int)*ht);
//set stream
for(int i=0;i<PLAN1D_SIZE;i++){
if(hipsolverDnCreate(&handle[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] hipsolverDnCreate failed!",__FUNCTION__,__LINE__);
return;
}
if(hipsolverDnSetStream(handle[i],stream[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] hipsolverDnCreate failed!",__FUNCTION__,__LINE__);
return;
}
}
#pragma unroll
for(int i=0;i<ht;i++){
if(hipsolverDnCreateGesvdjInfo(¶ms[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:creation svd info error",__FUNCTION__,__LINE__);
return;
}
}
int tupe_num=ht/PLAN1D_SIZE;
int tupe_s=ht%PLAN1D_SIZE;
if(tupe_num > 0){
for(int j=0;j<tupe_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
if(hipsolverDnCgesvdj_bufferSize(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i+j*PLAN1D_SIZE],
params[i+j*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<tupe_s;i++){
if(hipsolverDnCgesvdj_bufferSize(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i+tupe_num*PLAN1D_SIZE],
params[i+tupe_num*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}else{
for(int i=0;i<tupe_s;i++){
if(hipsolverDnCgesvdj_bufferSize(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i],
params[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<PLAN1D_SIZE;i++){
hipStreamSynchronize(stream[i]);
}
work=(hipComplex**)malloc(ht*sizeof(hipComplex*));
for(int i=0;i<ht;i++){
if(hipMalloc((void**)&work[i],sizeof(hipComplex)*lwork[i]) !=hipSuccess){
fprintf(stdout,"[%s]:[%d] hipMalloc error!",__FUNCTION__,__LINE__);
return;
}
}
/*for(int i=0;i<tupe;i++){
if(hipMalloc((void**)&work[i],sizeof(hipComplex)*lwork[i]) !=hipSuccess){
fprintf(stdout,"[%s]:[%d] hipMalloc error!",__FUNCTION__,__LINE__);
return;
}*/
int step_d = m*n;
int step_u = m*((m<n)?m:n);
int step_s = ((m<n)?m:n);
int step_v = n*((m<n)?m:n);
if(tupe_num >0){
for(int j=0;j<tupe_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
if(hipsolverDnCgesvdj(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i+j*step_d*PLAN1D_SIZE,
lda,
d_s+i*step_s+j*step_s*PLAN1D_SIZE,
d_u+i*step_u+j*step_u*PLAN1D_SIZE,
ldu,
d_v+i*step_v+j*step_v*PLAN1D_SIZE,
ldv,
work[i+j*PLAN1D_SIZE],
lwork[i+j*PLAN1D_SIZE],
&info[i+j*PLAN1D_SIZE],
params[i+j*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:hipsolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<tupe_s;i++){
if(hipsolverDnCgesvdj(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i+tupe_num*step_d*PLAN1D_SIZE,
lda,
d_s+i*step_s+tupe_num*step_s*PLAN1D_SIZE,
d_u+i*step_u+tupe_num*step_u*PLAN1D_SIZE,
ldu,
d_v+i*step_v+tupe_num*step_v*PLAN1D_SIZE,
ldv,
work[i+tupe_num*PLAN1D_SIZE],
lwork[i+tupe_num*PLAN1D_SIZE],
&info[i+tupe_num*PLAN1D_SIZE],
params[i+tupe_num*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:hipsolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}else{
for(int i=0;i<tupe_s;i++){
if(hipsolverDnCgesvdj(
handle[i],
HIPSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i,
lda,
d_s+i*step_s,
d_u+i*step_u,
ldu,
d_v+i*step_v,
ldv,
work[i],
lwork[i],
&info[i],
params[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:hipsolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<PLAN1D_SIZE;i++){
if(hipsolverDnDestroy(handle[i])!=CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] hipsolverDnDestroy failed!",__FUNCTION__,__LINE__);
return;
}
}
for(int i=0;i<ht;i++){
if(hipsolverDnDestroyGesvdjInfo(params[i])!=CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] hipsolverDnDestroy failed!",__FUNCTION__,__LINE__);
return;
}
}
if(d_fftData != NULL){
hipFree(d_fftData);
d_fftData = NULL;
}
if(work != NULL){
for(int i=0;i<tupe;i++){
hipFree(work[i]);
}
hipFree(work);
work = NULL;
}
if(info != NULL){
hipFree(info);
info = NULL;
}
#endif
for(int i=0;i<PLAN1D_SIZE;i++){
if(hipStreamDestroy(stream[i]) != hipSuccess){
fprintf(stdout,"[%s]:[%d] destory stream error!",__FUNCTION__,__LINE__);
return;
}
}
//ifft_u
int threads=0;
int blocks=0;
hipfftHandle iplan =0;
in[0] = ht;
on[0] = tupe;
bat = m*((m<n)?m:n);
stride = bat;
float* du;
hipMalloc((void**)&du,sizeof(float)*bat*tupe);
if (hipfftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_C2R,bat)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
if(hipfftGetSizeMany(iplan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_C2R,bat,&worksize)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
//printf("the work size is:%ld G\n",(double)worksize/(1024*1024*1024));
if(hipfftExecC2R(iplan,(hipfftComplex*)d_u,du)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
int num=0;
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
hipLaunchKernelGGL(( fftResultProcess), dim3(blocks),dim3(threads), 0, 0, du,num,tupe);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipMemcpy(U,du,sizeof(float)*bat*tupe,hipMemcpyDeviceToHost);
hipFree(du);
hipFree(d_u);
//ifft_v
in[0] = ht;
on[0] = tupe;
bat = n*((m<n)?m:n);
stride = bat;
float* dv;
hipMalloc((void**)&dv,sizeof(float)*bat*tupe);
if (hipfftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_C2R,bat)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
if(hipfftGetSizeMany(iplan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_C2R,bat,&worksize)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
//printf("the work size is:%ld G\n",(double)worksize/(1024*1024*1024));
if(hipfftExecC2R(iplan,(hipfftComplex*)d_v,dv)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
hipLaunchKernelGGL(( fftResultProcess), dim3(blocks),dim3(threads), 0, 0, dv,num,tupe);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipMemcpy(V,dv,sizeof(float)*bat*tupe,hipMemcpyDeviceToHost);
hipFree(dv);
hipFree(d_v);
//ifft_s
bat = ((m<n)?m:n);
stride = bat;
hipComplex* d_s2;
hipMalloc((void**)&d_s2,sizeof(hipComplex)*ht*bat);
float* d_s3;
hipMalloc((void**)&d_s3,sizeof(float)*tupe*bat);
num=bat*ht;
if(ht*bat<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
hipLaunchKernelGGL(( float2cuComplex), dim3(blocks),dim3(threads), 0, 0, d_s,ht*bat,d_s2);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipFree(d_s);
if (hipfftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
HIPFFT_C2R,bat)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
if(hipfftExecC2R(iplan,(hipfftComplex*)d_s2,d_s3) != HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
hipLaunchKernelGGL(( fftResultProcess), dim3(blocks),dim3(threads), 0, 0, d_s3,bat*tupe,tupe);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipMemcpy(S,d_s3,sizeof(float)*tupe*bat,hipMemcpyDeviceToHost);
if(hipfftDestroy(iplan)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory failed!",__FUNCTION__,__LINE__);
return;
}
hipFree(d_s3);
hipFree(d_s2);
}
| b8737a95580662ddfd31739dce4ec451565d3c90.cu | #include "svd.h"
#include "based.h"
void streamedtsvd(float* t,const int m,const int n,const int tupe,float* U,float* S,float* V){
int ht = tupe/2+1;
int bat = m*n;
float* d_t;
cufftComplex* d_fftData;
cudaMalloc((void**)&d_t,sizeof(float)*bat*tupe);
cudaMalloc((void**)&d_fftData,sizeof(cufftComplex)*bat*ht);
cudaMemcpy(d_t,t,sizeof(float)*bat*tupe,cudaMemcpyHostToDevice);
//tff
cufftHandle plan;
int n_f[1] = {tupe};
int stride = bat,dist = 1;
int in[1] = {tupe};
int on[1] = {ht};
size_t worksize=0;
if (cufftPlanMany(&plan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_R2C,bat)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
/* if(cufftGetSizeMany(plan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_R2C,bat,&worksize)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
printf("the work size is:%lf G\n",(double)worksize/(1024*1024*1024));
*/
if(cufftExecR2C(plan,d_t,(cufftComplex*)d_fftData)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaFree(d_t);
if(cufftDestroy(plan)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
//set stream for t
cudaStream_t* stream = (cudaStream_t*)malloc(PLAN1D_SIZE*sizeof(cudaStream_t));
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
cudaStreamCreate(&stream[i]);
}
#if 1//tsvd
cusolverDnHandle_t* handle=(cusolverDnHandle_t*)malloc(PLAN1D_SIZE*sizeof(cusolverDnHandle_t));
gesvdjInfo_t* params=(gesvdjInfo_t*)malloc(ht*sizeof(gesvdjInfo_t));
int* info = NULL;
int echo = 1;
int lda = m;
int ldu = m;
int ldv = n;
int* lwork = (int*)malloc(ht*sizeof(int));
cuComplex** work=NULL;
//malloc u s v
float* d_s = NULL;
cuComplex* d_u = NULL;
cuComplex* d_v = NULL;
cudaMalloc((void**)&d_s,sizeof(float)*ht*((m<n)?m:n));
cudaMalloc((void**)&d_u,sizeof(cuComplex)*ht*m*((m<n)?m:n));
cudaMalloc((void**)&d_v,sizeof(cuComplex)*ht*n*((m<n)?m:n));
cudaMalloc((void**)&info,sizeof(int)*ht);
//set stream
for(int i=0;i<PLAN1D_SIZE;i++){
if(cusolverDnCreate(&handle[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] cusolverDnCreate failed!",__FUNCTION__,__LINE__);
return;
}
if(cusolverDnSetStream(handle[i],stream[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] cusolverDnCreate failed!",__FUNCTION__,__LINE__);
return;
}
}
#pragma unroll
for(int i=0;i<ht;i++){
if(cusolverDnCreateGesvdjInfo(¶ms[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:creation svd info error",__FUNCTION__,__LINE__);
return;
}
}
int tupe_num=ht/PLAN1D_SIZE;
int tupe_s=ht%PLAN1D_SIZE;
if(tupe_num > 0){
for(int j=0;j<tupe_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
if(cusolverDnCgesvdj_bufferSize(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i+j*PLAN1D_SIZE],
params[i+j*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<tupe_s;i++){
if(cusolverDnCgesvdj_bufferSize(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i+tupe_num*PLAN1D_SIZE],
params[i+tupe_num*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}else{
for(int i=0;i<tupe_s;i++){
if(cusolverDnCgesvdj_bufferSize(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData,
m,
d_s,
d_u,
ldu,
d_v,
ldv,
&lwork[i],
params[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<PLAN1D_SIZE;i++){
cudaStreamSynchronize(stream[i]);
}
work=(cuComplex**)malloc(ht*sizeof(cuComplex*));
for(int i=0;i<ht;i++){
if(cudaMalloc((void**)&work[i],sizeof(cuComplex)*lwork[i]) !=cudaSuccess){
fprintf(stdout,"[%s]:[%d] cudaMalloc error!",__FUNCTION__,__LINE__);
return;
}
}
/*for(int i=0;i<tupe;i++){
if(cudaMalloc((void**)&work[i],sizeof(cuComplex)*lwork[i]) !=cudaSuccess){
fprintf(stdout,"[%s]:[%d] cudaMalloc error!",__FUNCTION__,__LINE__);
return;
}*/
int step_d = m*n;
int step_u = m*((m<n)?m:n);
int step_s = ((m<n)?m:n);
int step_v = n*((m<n)?m:n);
if(tupe_num >0){
for(int j=0;j<tupe_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
if(cusolverDnCgesvdj(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i+j*step_d*PLAN1D_SIZE,
lda,
d_s+i*step_s+j*step_s*PLAN1D_SIZE,
d_u+i*step_u+j*step_u*PLAN1D_SIZE,
ldu,
d_v+i*step_v+j*step_v*PLAN1D_SIZE,
ldv,
work[i+j*PLAN1D_SIZE],
lwork[i+j*PLAN1D_SIZE],
&info[i+j*PLAN1D_SIZE],
params[i+j*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:cusolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<tupe_s;i++){
if(cusolverDnCgesvdj(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i+tupe_num*step_d*PLAN1D_SIZE,
lda,
d_s+i*step_s+tupe_num*step_s*PLAN1D_SIZE,
d_u+i*step_u+tupe_num*step_u*PLAN1D_SIZE,
ldu,
d_v+i*step_v+tupe_num*step_v*PLAN1D_SIZE,
ldv,
work[i+tupe_num*PLAN1D_SIZE],
lwork[i+tupe_num*PLAN1D_SIZE],
&info[i+tupe_num*PLAN1D_SIZE],
params[i+tupe_num*PLAN1D_SIZE]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:cusolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}else{
for(int i=0;i<tupe_s;i++){
if(cusolverDnCgesvdj(
handle[i],
CUSOLVER_EIG_MODE_VECTOR,
echo,
m,
n,
d_fftData+step_d*i,
lda,
d_s+i*step_s,
d_u+i*step_u,
ldu,
d_v+i*step_v,
ldv,
work[i],
lwork[i],
&info[i],
params[i]) != CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:cusolverDnCgesvdj failed!",__FUNCTION__,__LINE__);
return;
}
}
}
for(int i=0;i<PLAN1D_SIZE;i++){
if(cusolverDnDestroy(handle[i])!=CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] cusolverDnDestroy failed!",__FUNCTION__,__LINE__);
return;
}
}
for(int i=0;i<ht;i++){
if(cusolverDnDestroyGesvdjInfo(params[i])!=CUSOLVER_STATUS_SUCCESS){
fprintf(stdout,"[%s]:[%d] cusolverDnDestroy failed!",__FUNCTION__,__LINE__);
return;
}
}
if(d_fftData != NULL){
cudaFree(d_fftData);
d_fftData = NULL;
}
if(work != NULL){
for(int i=0;i<tupe;i++){
cudaFree(work[i]);
}
cudaFree(work);
work = NULL;
}
if(info != NULL){
cudaFree(info);
info = NULL;
}
#endif
for(int i=0;i<PLAN1D_SIZE;i++){
if(cudaStreamDestroy(stream[i]) != cudaSuccess){
fprintf(stdout,"[%s]:[%d] destory stream error!",__FUNCTION__,__LINE__);
return;
}
}
//ifft_u
int threads=0;
int blocks=0;
cufftHandle iplan =0;
in[0] = ht;
on[0] = tupe;
bat = m*((m<n)?m:n);
stride = bat;
float* du;
cudaMalloc((void**)&du,sizeof(float)*bat*tupe);
if (cufftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_C2R,bat)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
if(cufftGetSizeMany(iplan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_C2R,bat,&worksize)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
//printf("the work size is:%ld G\n",(double)worksize/(1024*1024*1024));
if(cufftExecC2R(iplan,(cufftComplex*)d_u,du)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
int num=0;
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
fftResultProcess<<<blocks,threads>>>(du,num,tupe);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaMemcpy(U,du,sizeof(float)*bat*tupe,cudaMemcpyDeviceToHost);
cudaFree(du);
cudaFree(d_u);
//ifft_v
in[0] = ht;
on[0] = tupe;
bat = n*((m<n)?m:n);
stride = bat;
float* dv;
cudaMalloc((void**)&dv,sizeof(float)*bat*tupe);
if (cufftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_C2R,bat)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
//estimate of the work size
if(cufftGetSizeMany(iplan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_C2R,bat,&worksize)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Estimate work size failed!",__FUNCTION__,__LINE__);
return;
}
//printf("the work size is:%ld G\n",(double)worksize/(1024*1024*1024));
if(cufftExecC2R(iplan,(cufftComplex*)d_v,dv)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
fftResultProcess<<<blocks,threads>>>(dv,num,tupe);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaMemcpy(V,dv,sizeof(float)*bat*tupe,cudaMemcpyDeviceToHost);
cudaFree(dv);
cudaFree(d_v);
//ifft_s
bat = ((m<n)?m:n);
stride = bat;
cuComplex* d_s2;
cudaMalloc((void**)&d_s2,sizeof(cuComplex)*ht*bat);
float* d_s3;
cudaMalloc((void**)&d_s3,sizeof(float)*tupe*bat);
num=bat*ht;
if(ht*bat<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
float2cuComplex<<<blocks,threads>>>(d_s,ht*bat,d_s2);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaFree(d_s);
if (cufftPlanMany(&iplan,1,n_f,in,stride,dist,on,stride,dist,
CUFFT_C2R,bat)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
if(cufftExecC2R(iplan,(cufftComplex*)d_s2,d_s3) != CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
num=bat*tupe;
if(num<512){
threads=num;
blocks=1;
}else{
threads=512;
blocks=((num%512 ==0)?num/512:num/512+1);
}
fftResultProcess<<<blocks,threads>>>(d_s3,bat*tupe,tupe);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaMemcpy(S,d_s3,sizeof(float)*tupe*bat,cudaMemcpyDeviceToHost);
if(cufftDestroy(iplan)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory failed!",__FUNCTION__,__LINE__);
return;
}
cudaFree(d_s3);
cudaFree(d_s2);
}
|
e3eb843469e284dc966014b6efc5b5d87af4ba2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Accelereyes
Monte Carlo Pi Estimation
Estimate pi by calculating the ratio of points that fell inside of a
unit circle with the points that did not.
*/
#include <iostream>
#include <vector>
#include "../../common.h"
using namespace std;
// Create a kernel to estimate pi
__global__
void pi(float* randx, float* randy, int *block_sums, int nsamples) {
extern __shared__ int sums[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
sums[threadIdx.x] = count;
__syncthreads();
if(threadIdx.x == 0) {
int sum = 0;
for(int i = 0; i < blockDim.x; i++) {
sum += count;
}
block_sums[blockIdx.x] = sum;
}
}
// Create a kernel to estimate pi
__global__
void pi_reduce(float* randx, float* randy, int *block_sums, int nsamples) {
extern __shared__ int sums[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
sums[threadIdx.x] = count;
int offset = blockDim.x >> 1;
while (offset >= 32) {
__syncthreads();
if (threadIdx.x < offset) {
sums[threadIdx.x] += sums[threadIdx.x + offset];
}
offset >>= 1;
}
if(threadIdx.x < 32)
atomicAdd(block_sums, sums[threadIdx.x]);
}
// Create a kernel to estimate pi
__global__
void pi_reduce_atomic(float* randx, float* randy, int *block_sums, int nsamples) {
__shared__ int sums[32];
if(threadIdx.x < 32) sums[threadIdx.x] = 0;
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
if(index < nsamples) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
}
int widx = threadIdx.x % 32;
atomicAdd(sums + widx, count);
__syncthreads();
if(threadIdx.x < 32) {
atomicAdd(block_sums, sums[threadIdx.x]);
}
}
int nsamples = 1e8;
int main(void)
{
// allocate space to hold random values
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL)); // seed with system clock
//Initialize vector with random values
for (int i = 0; i < h_randNumsX.size(); ++i) {
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
// Send random values to the GPU
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
hipMalloc(&d_randNumsX, size); // TODO check return cuda* return codes
hipMalloc(&d_randNumsY, size);
hipMemcpy(d_randNumsX, h_randNumsX.data(), size, hipMemcpyHostToDevice);
hipMemcpy(d_randNumsY, h_randNumsY.data(), size, hipMemcpyHostToDevice);
int samples_per_thread = 1000;
int threads = 256;
int blocks = nsamples /(threads * samples_per_thread);
int* block_sums;
hipMalloc(&block_sums, blocks * sizeof(int));
hipLaunchKernelGGL(( pi) , dim3(blocks), dim3(threads), threads * sizeof(int), 0, d_randNumsX, d_randNumsY, block_sums, nsamples);
hipMemset(block_sums, 0, sizeof(int));
hipLaunchKernelGGL(( pi_reduce) , dim3(blocks), dim3(threads), threads * sizeof(int), 0, d_randNumsX, d_randNumsY, block_sums, nsamples);
hipMemset(block_sums, 0, sizeof(int));
hipLaunchKernelGGL(( pi_reduce_atomic) , dim3(blocks), dim3(threads), 0, 0, d_randNumsX, d_randNumsY, block_sums, nsamples);
vector<int> h_block_sums(blocks, 0);
hipMemcpy(h_block_sums.data(), block_sums, sizeof(int), hipMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for(int sum : h_block_sums) {
nsamples_in_circle += sum;
}
// fraction that fell within (quarter) of unit circle
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
} | e3eb843469e284dc966014b6efc5b5d87af4ba2d.cu | /*
Accelereyes
Monte Carlo Pi Estimation
Estimate pi by calculating the ratio of points that fell inside of a
unit circle with the points that did not.
*/
#include <iostream>
#include <vector>
#include "../../common.h"
using namespace std;
// Create a kernel to estimate pi
__global__
void pi(float* randx, float* randy, int *block_sums, int nsamples) {
extern __shared__ int sums[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
sums[threadIdx.x] = count;
__syncthreads();
if(threadIdx.x == 0) {
int sum = 0;
for(int i = 0; i < blockDim.x; i++) {
sum += count;
}
block_sums[blockIdx.x] = sum;
}
}
// Create a kernel to estimate pi
__global__
void pi_reduce(float* randx, float* randy, int *block_sums, int nsamples) {
extern __shared__ int sums[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
sums[threadIdx.x] = count;
int offset = blockDim.x >> 1;
while (offset >= 32) {
__syncthreads();
if (threadIdx.x < offset) {
sums[threadIdx.x] += sums[threadIdx.x + offset];
}
offset >>= 1;
}
if(threadIdx.x < 32)
atomicAdd(block_sums, sums[threadIdx.x]);
}
// Create a kernel to estimate pi
__global__
void pi_reduce_atomic(float* randx, float* randy, int *block_sums, int nsamples) {
__shared__ int sums[32];
if(threadIdx.x < 32) sums[threadIdx.x] = 0;
int id = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int stride = gridDim.x * blockDim.x;
for(int index = id; index < nsamples; index += stride) {
if(index < nsamples) {
float x = randx[index];
float y = randy[index];
count += (x*x + y*y) < 1.0f;
}
}
int widx = threadIdx.x % 32;
atomicAdd(sums + widx, count);
__syncthreads();
if(threadIdx.x < 32) {
atomicAdd(block_sums, sums[threadIdx.x]);
}
}
int nsamples = 1e8;
int main(void)
{
// allocate space to hold random values
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL)); // seed with system clock
//Initialize vector with random values
for (int i = 0; i < h_randNumsX.size(); ++i) {
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
// Send random values to the GPU
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
cudaMalloc(&d_randNumsX, size); // TODO check return cuda* return codes
cudaMalloc(&d_randNumsY, size);
cudaMemcpy(d_randNumsX, h_randNumsX.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_randNumsY, h_randNumsY.data(), size, cudaMemcpyHostToDevice);
int samples_per_thread = 1000;
int threads = 256;
int blocks = nsamples /(threads * samples_per_thread);
int* block_sums;
cudaMalloc(&block_sums, blocks * sizeof(int));
pi <<< blocks, threads, threads * sizeof(int)>>> (d_randNumsX, d_randNumsY, block_sums, nsamples);
cudaMemset(block_sums, 0, sizeof(int));
pi_reduce <<< blocks, threads, threads * sizeof(int)>>> (d_randNumsX, d_randNumsY, block_sums, nsamples);
cudaMemset(block_sums, 0, sizeof(int));
pi_reduce_atomic <<< blocks, threads>>> (d_randNumsX, d_randNumsY, block_sums, nsamples);
vector<int> h_block_sums(blocks, 0);
cudaMemcpy(h_block_sums.data(), block_sums, sizeof(int), cudaMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for(int sum : h_block_sums) {
nsamples_in_circle += sum;
}
// fraction that fell within (quarter) of unit circle
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
} |
da9920fd25f34046ba8fad5b254ff77298b0fe99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Variable.h"
Variable::Variable (const int Nx, const int Ny) {
// Allocate on Host
h_var = (double*)malloc(Nx*Ny*sizeof(double));
h_varx = (double*)malloc(Nx*Ny*sizeof(double));
h_vary = (double*)malloc(Nx*Ny*sizeof(double));
// Allocate on Device
hipMalloc(&d_var,Nx*Ny*sizeof(double));
hipMalloc(&d_varx,Nx*Ny*sizeof(double));
hipMalloc(&d_vary,Nx*Ny*sizeof(double));
hipMalloc(&VAR,(Nx/2+1)*Ny*sizeof(hipDoubleComplex));
hipMalloc(&VARX,(Nx/2+1)*Ny*sizeof(hipDoubleComplex));
hipMalloc(&VARY,(Nx/2+1)*Ny*sizeof(hipDoubleComplex));
// CUFFT Plans
hipfftPlan2d(&fft_var, Ny, Nx, HIPFFT_D2Z);
hipfftPlan2d(&ifft_VARX, Ny, Nx, HIPFFT_Z2D);
hipfftPlan2d(&ifft_VARY, Ny, Nx, HIPFFT_Z2D);
hipfftPlan2d(&ifft_VAR, Ny, Nx, HIPFFT_Z2D);
};
Variable::~Variable () {
hipfftDestroy(fft_var);
hipfftDestroy(ifft_VARX);
hipfftDestroy(ifft_VARY);
free(h_var);
free(h_varx);
free(h_vary);
hipFree(d_var);
hipFree(d_varx);
hipFree(d_vary);
hipFree(VAR);
hipFree(VARX);
hipFree(VARY);
};
double Variable::MaxVar (const int Nx, const int Ny) {
// Calculate the maximum velocity for adaptive dt
double max_var = fabs(h_var[0]);
for (int ii=1; ii<Nx*Ny; ii++) {
if (max(fabs(h_var[ii]),max_var) != max_var) {
max_var = fabs(h_var[ii]);
}
}
return max_var;
}
void Variable::ComputeDerivatives (const double* k, const double* l, const int Nx, const int Ny, const int tpb, const int nblks) {
// Fourier transform
hipfftExecD2Z(fft_var, d_var, VAR);
// Calculate derivatives
hipLaunchKernelGGL(( CalcDerivs), dim3(nblks),dim3(tpb), 0, 0, VAR, VARX, VARY, k, l, Nx, Ny);
// Transform back
hipfftExecZ2D(ifft_VARX, VARX, d_varx);
hipfftExecZ2D(ifft_VARY, VARY, d_vary);
}
void Variable::Filter (const double* filter, const int Nx, const int Ny, const int tpb, const int nblks) {
// Fourier transform
hipfftExecD2Z(fft_var, d_var, VAR);
// Apply filter
hipLaunchKernelGGL(( takeFilter), dim3(nblks),dim3(tpb), 0, 0, VAR, filter, Nx, Ny);
// Transform back
hipfftExecZ2D(ifft_VAR, VAR, d_var);
} | da9920fd25f34046ba8fad5b254ff77298b0fe99.cu | #include "Variable.h"
Variable::Variable (const int Nx, const int Ny) {
// Allocate on Host
h_var = (double*)malloc(Nx*Ny*sizeof(double));
h_varx = (double*)malloc(Nx*Ny*sizeof(double));
h_vary = (double*)malloc(Nx*Ny*sizeof(double));
// Allocate on Device
cudaMalloc(&d_var,Nx*Ny*sizeof(double));
cudaMalloc(&d_varx,Nx*Ny*sizeof(double));
cudaMalloc(&d_vary,Nx*Ny*sizeof(double));
cudaMalloc(&VAR,(Nx/2+1)*Ny*sizeof(cuDoubleComplex));
cudaMalloc(&VARX,(Nx/2+1)*Ny*sizeof(cuDoubleComplex));
cudaMalloc(&VARY,(Nx/2+1)*Ny*sizeof(cuDoubleComplex));
// CUFFT Plans
cufftPlan2d(&fft_var, Ny, Nx, CUFFT_D2Z);
cufftPlan2d(&ifft_VARX, Ny, Nx, CUFFT_Z2D);
cufftPlan2d(&ifft_VARY, Ny, Nx, CUFFT_Z2D);
cufftPlan2d(&ifft_VAR, Ny, Nx, CUFFT_Z2D);
};
Variable::~Variable () {
cufftDestroy(fft_var);
cufftDestroy(ifft_VARX);
cufftDestroy(ifft_VARY);
free(h_var);
free(h_varx);
free(h_vary);
cudaFree(d_var);
cudaFree(d_varx);
cudaFree(d_vary);
cudaFree(VAR);
cudaFree(VARX);
cudaFree(VARY);
};
double Variable::MaxVar (const int Nx, const int Ny) {
// Calculate the maximum velocity for adaptive dt
double max_var = fabs(h_var[0]);
for (int ii=1; ii<Nx*Ny; ii++) {
if (max(fabs(h_var[ii]),max_var) != max_var) {
max_var = fabs(h_var[ii]);
}
}
return max_var;
}
void Variable::ComputeDerivatives (const double* k, const double* l, const int Nx, const int Ny, const int tpb, const int nblks) {
// Fourier transform
cufftExecD2Z(fft_var, d_var, VAR);
// Calculate derivatives
CalcDerivs<<<nblks,tpb>>>(VAR, VARX, VARY, k, l, Nx, Ny);
// Transform back
cufftExecZ2D(ifft_VARX, VARX, d_varx);
cufftExecZ2D(ifft_VARY, VARY, d_vary);
}
void Variable::Filter (const double* filter, const int Nx, const int Ny, const int tpb, const int nblks) {
// Fourier transform
cufftExecD2Z(fft_var, d_var, VAR);
// Apply filter
takeFilter<<<nblks,tpb>>>(VAR, filter, Nx, Ny);
// Transform back
cufftExecZ2D(ifft_VAR, VAR, d_var);
} |
58508f47e9837fe5975a795358a1df1a79ed9674.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/Resize.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#endif
namespace at::native {
namespace {
constexpr int MULTIMARGIN_THREADS = 128;
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_forward_kernel(
scalar_t *output, const scalar_t *input, const int64_t *target, const scalar_t *weights,
int nframe, int dim, bool sizeAverage, scalar_t margin, TORCH_DSA_KERNEL_ARGS) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
const scalar_t *input_k = input + k*dim;
scalar_t *output_k = output + k;
int target_k = static_cast<int>(target[k]);
CUDA_KERNEL_ASSERT2(target_k >= 0 && target_k < dim && "target index is out of bounds");
scalar_t input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
scalar_t h = (P==1) ? z : z*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
const int denom = sizeAverage ? nframe * dim : dim;
*output_k = static_cast<scalar_t>(sum / denom);
}
}
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_backward_kernel(
scalar_t *gradInput, const scalar_t *gradOutput, const scalar_t *input, const int64_t *target,
const scalar_t *weights, int nframe, int dim, bool sizeAverage, scalar_t margin,
bool reduce) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
const scalar_t *input_k = input + k*dim;
scalar_t *gradInput_k = gradInput + k*dim;
int target_k = static_cast<int>(target[k]);
scalar_t input_target_k = input_k[target_k];
const scalar_t *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
const int denom = sizeAverage && reduce ? nframe * dim : dim;
const acc_t g = acc_t(1) / static_cast<acc_t>(denom);
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
acc_t h = (P == 1) ? g : 2*g*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] -= static_cast<scalar_t>(h);
gradInput_k[i] = static_cast<scalar_t>(h);
} else {
gradInput_k[i] = static_cast<scalar_t>(0);
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++) {
gradInput_target_k += buffer[i];
}
gradInput_k[target_k] = static_cast<scalar_t>(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step) {
gradInput_k[i] *= * gradOutput_k;
}
}
void multi_margin_loss_shape_check(int &nframe,
const Tensor &input, const Tensor &target) {
auto in_sizes = input.sizes();
auto dims = in_sizes.size();
TORCH_CHECK(
(dims == 2 && in_sizes[1] != 0) || (dims == 1 && in_sizes[0] != 0) || dims == 0,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
in_sizes);
nframe = dims <= 1 ? 1 : in_sizes[0];
TORCH_CHECK(
target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size, expected ", nframe, " but got ",
target.sizes());
}
} // namespace (anonymous)
Tensor& multi_margin_loss_cuda_out(
const Tensor &input_, const Tensor &target_, const Scalar &p_, const Scalar &margin_,
const c10::optional<Tensor> &weights_, int64_t reduction, Tensor& out_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2, "multi_margin_loss: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
// produce a scalar output for 1d input
if (reduction == Reduction::None && target_.dim() > 0) {
resize_output(out_, {nframe});
} else {
resize_output(out_, {});
}
if (input_.numel() == 0) {
return out_;
}
auto input = input_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
auto out = (out_.is_contiguous() ? out_ :
at::empty(out_.sizes(), input.options()));
const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "multi_margin_loss_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
TORCH_CHECK(target.dim() <= 1 && target.numel() == nframe, "inconsistent target size");
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(
(MultiMarginLoss_forward_kernel<1>),
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH((MultiMarginLoss_forward_kernel<2>),
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
// allow zero-dim target for 2D input.
TORCH_CHECK(in_sizes[1] != 0 && target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size");
dim3 blocks(nframe);
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == at::Reduction::None) {
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<1>,
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<2>,
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
}
} else {
auto tmp_output = at::empty({nframe}, input.options());
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<1>,
blocks,
threads,
0,
stream,
tmp_output.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<2>,
blocks,
threads,
0,
stream,
tmp_output.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
}
at::sum_out(out, tmp_output, IntArrayRef{});
}
}
});
if (!out.is_alias_of(out_)) {
out_.copy_(out);
}
return out_;
}
Tensor multi_margin_loss_cuda(
const Tensor &input, const Tensor &target, const Scalar &p, const Scalar &margin,
const c10::optional<Tensor> &weights, int64_t reduction) {
auto out = at::empty({0}, input.options());
multi_margin_loss_cuda_out(input, target, p, margin, weights, reduction, out);
return out;
}
Tensor& multi_margin_loss_cuda_backward_out(
const Tensor &grad_output_,const Tensor &input_, const Tensor &target_,
const Scalar &p_, const Scalar &margin_, const c10::optional<Tensor> &weights_,
int64_t reduction, Tensor &grad_input_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2,
"multi_margin_loss_backward: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
resize_output(grad_input_, input_.sizes());
if (input_.numel() == 0) {
return grad_input_;
}
auto input = input_.contiguous();
auto grad_input = (grad_input_.is_contiguous() ? grad_input_ :
at::empty(grad_input_.sizes(), input.options()));
auto grad_output = grad_output_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"multi_margin_loss_backward_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
TORCH_CHECK((in_sizes[1] != 0) && (target.dim() <= 1) && (target.numel() == nframe),
"inconsistent target size");
dim3 blocks(in_sizes[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
if (!grad_input.is_alias_of(grad_input_)) {
grad_input_.copy_(grad_input);
}
return grad_input_;
}
Tensor multi_margin_loss_cuda_backward(
const Tensor &grad_output, const Tensor &input, const Tensor &target,
const Scalar &p, const Scalar &margin, const c10::optional<Tensor> &weights,
int64_t reduction) {
auto grad_input = at::empty({0}, input.options());
multi_margin_loss_cuda_backward_out(
grad_output, input, target, p, margin, weights, reduction, grad_input);
return grad_input;
}
} // namespace at::native
| 58508f47e9837fe5975a795358a1df1a79ed9674.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/Resize.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#endif
namespace at::native {
namespace {
constexpr int MULTIMARGIN_THREADS = 128;
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_forward_kernel(
scalar_t *output, const scalar_t *input, const int64_t *target, const scalar_t *weights,
int nframe, int dim, bool sizeAverage, scalar_t margin, TORCH_DSA_KERNEL_ARGS) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
const scalar_t *input_k = input + k*dim;
scalar_t *output_k = output + k;
int target_k = static_cast<int>(target[k]);
CUDA_KERNEL_ASSERT2(target_k >= 0 && target_k < dim && "target index is out of bounds");
scalar_t input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
scalar_t h = (P==1) ? z : z*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
const int denom = sizeAverage ? nframe * dim : dim;
*output_k = static_cast<scalar_t>(sum / denom);
}
}
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_backward_kernel(
scalar_t *gradInput, const scalar_t *gradOutput, const scalar_t *input, const int64_t *target,
const scalar_t *weights, int nframe, int dim, bool sizeAverage, scalar_t margin,
bool reduce) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
const scalar_t *input_k = input + k*dim;
scalar_t *gradInput_k = gradInput + k*dim;
int target_k = static_cast<int>(target[k]);
scalar_t input_target_k = input_k[target_k];
const scalar_t *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
const int denom = sizeAverage && reduce ? nframe * dim : dim;
const acc_t g = acc_t(1) / static_cast<acc_t>(denom);
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
acc_t h = (P == 1) ? g : 2*g*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] -= static_cast<scalar_t>(h);
gradInput_k[i] = static_cast<scalar_t>(h);
} else {
gradInput_k[i] = static_cast<scalar_t>(0);
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++) {
gradInput_target_k += buffer[i];
}
gradInput_k[target_k] = static_cast<scalar_t>(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step) {
gradInput_k[i] *= * gradOutput_k;
}
}
void multi_margin_loss_shape_check(int &nframe,
const Tensor &input, const Tensor &target) {
auto in_sizes = input.sizes();
auto dims = in_sizes.size();
TORCH_CHECK(
(dims == 2 && in_sizes[1] != 0) || (dims == 1 && in_sizes[0] != 0) || dims == 0,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
in_sizes);
nframe = dims <= 1 ? 1 : in_sizes[0];
TORCH_CHECK(
target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size, expected ", nframe, " but got ",
target.sizes());
}
} // namespace (anonymous)
Tensor& multi_margin_loss_cuda_out(
const Tensor &input_, const Tensor &target_, const Scalar &p_, const Scalar &margin_,
const c10::optional<Tensor> &weights_, int64_t reduction, Tensor& out_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2, "multi_margin_loss: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
// produce a scalar output for 1d input
if (reduction == Reduction::None && target_.dim() > 0) {
resize_output(out_, {nframe});
} else {
resize_output(out_, {});
}
if (input_.numel() == 0) {
return out_;
}
auto input = input_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
auto out = (out_.is_contiguous() ? out_ :
at::empty(out_.sizes(), input.options()));
const auto stream = c10::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "multi_margin_loss_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
TORCH_CHECK(target.dim() <= 1 && target.numel() == nframe, "inconsistent target size");
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(
(MultiMarginLoss_forward_kernel<1>),
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH((MultiMarginLoss_forward_kernel<2>),
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
// allow zero-dim target for 2D input.
TORCH_CHECK(in_sizes[1] != 0 && target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size");
dim3 blocks(nframe);
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == at::Reduction::None) {
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<1>,
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<2>,
blocks,
threads,
0,
stream,
out.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
}
} else {
auto tmp_output = at::empty({nframe}, input.options());
if (p == 1) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<1>,
blocks,
threads,
0,
stream,
tmp_output.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
} else if (p == 2) {
TORCH_DSA_KERNEL_LAUNCH(MultiMarginLoss_forward_kernel<2>,
blocks,
threads,
0,
stream,
tmp_output.mutable_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
}
at::sum_out(out, tmp_output, IntArrayRef{});
}
}
});
if (!out.is_alias_of(out_)) {
out_.copy_(out);
}
return out_;
}
Tensor multi_margin_loss_cuda(
const Tensor &input, const Tensor &target, const Scalar &p, const Scalar &margin,
const c10::optional<Tensor> &weights, int64_t reduction) {
auto out = at::empty({0}, input.options());
multi_margin_loss_cuda_out(input, target, p, margin, weights, reduction, out);
return out;
}
Tensor& multi_margin_loss_cuda_backward_out(
const Tensor &grad_output_,const Tensor &input_, const Tensor &target_,
const Scalar &p_, const Scalar &margin_, const c10::optional<Tensor> &weights_,
int64_t reduction, Tensor &grad_input_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2,
"multi_margin_loss_backward: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
resize_output(grad_input_, input_.sizes());
if (input_.numel() == 0) {
return grad_input_;
}
auto input = input_.contiguous();
auto grad_input = (grad_input_.is_contiguous() ? grad_input_ :
at::empty(grad_input_.sizes(), input.options()));
auto grad_output = grad_output_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
const auto stream = c10::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"multi_margin_loss_backward_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
MultiMarginLoss_backward_kernel<1> <<<blocks, threads, 0, stream>>>(
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_backward_kernel<2> <<<blocks, threads, 0, stream>>>(
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
TORCH_CHECK((in_sizes[1] != 0) && (target.dim() <= 1) && (target.numel() == nframe),
"inconsistent target size");
dim3 blocks(in_sizes[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
MultiMarginLoss_backward_kernel<1> <<<blocks, threads, 0, stream>>>(
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_backward_kernel<2> <<<blocks, threads, 0, stream>>>(
grad_input.mutable_data_ptr<scalar_t>(),
grad_output.const_data_ptr<scalar_t>(),
input.const_data_ptr<scalar_t>(),
target.const_data_ptr<int64_t>(),
weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
if (!grad_input.is_alias_of(grad_input_)) {
grad_input_.copy_(grad_input);
}
return grad_input_;
}
Tensor multi_margin_loss_cuda_backward(
const Tensor &grad_output, const Tensor &input, const Tensor &target,
const Scalar &p, const Scalar &margin, const c10::optional<Tensor> &weights,
int64_t reduction) {
auto grad_input = at::empty({0}, input.options());
multi_margin_loss_cuda_backward_out(
grad_output, input, target, p, margin, weights, reduction, grad_input);
return grad_input;
}
} // namespace at::native
|
ceae912ee95fb4892fd500a88c3a27f1bdb662af.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpurecon.h"
#ifdef __cplusplus
extern "C"
{
#endif
///Pointer to the reconstructor on the GPU, allocated and populated by calling init_gpurecon
float * rec_gpu;
///Pointer to memory for slopes on the GPU, allocated by calling init_gpurecon, updated with each call to gpurecon
float * slopes_gpu;
///Pointer to the amplitude vector on the GPU, allocated by calling init_gpurecon, filled with result of sgemv
float * amps_gpu;
///Number of modes in reconstructor (matrix rows, length or amp vector)
int n_modes;
///Number of slopes in reconstructor (matrix columns, length of slopes vector)
int n_slopes;
///Used bye cublas library
hipblasHandle_t handle;
///Get the current time as a double.
double get_curr_t()
{
struct timespec tsp;
clock_gettime(CLOCK_REALTIME, &tsp);
return ((double)tsp.tv_sec) + ((double)tsp.tv_nsec)/1e9;
}
int init_gpurecon(int nm, int ns, float *rec_host)
{
hipError_t cudaStat;
hipblasStatus_t stat;
//Initialize the cublas library
stat = hipblasCreate(&handle);
if ( stat != HIPBLAS_STATUS_SUCCESS )
{
fprintf (stderr, "CUBLAS initialization failed\n" ) ;
return EXIT_FAILURE;
}
n_modes = nm;
n_slopes = ns;
//Convert to column major storage.
float *colmaj = (float *) malloc(nm*ns*sizeof(float));
if(colmaj == 0)
{
fprintf(stderr, "Allocation of main memory for reconstructor col-major failed.\n");
return EXIT_FAILURE;
}
for(int i=0;i<nm;i++)
{
for(int j=0;j<ns;j++)
{
colmaj[j*nm +i] = rec_host[i*ns + j];
}
}
cudaStat = hipMalloc((void **)&rec_gpu, n_modes*n_slopes*sizeof(float));
if( cudaStat != hipSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
cudaStat = hipMalloc((void **)&slopes_gpu, n_slopes * sizeof(float));
if( cudaStat != hipSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
cudaStat = hipMalloc((void **) &s_gpu, n_modes*sizeof(float));
if( cudaStat != hipSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
//stat = hipblasSetMatrix ( M , N , s i z e o f (* a ) , a , M , devPtrA , M ) ;
stat = hipblasSetMatrix(n_modes, n_slopes, sizeof(float), colmaj, n_modes, rec_gpu, n_modes);
if ( stat != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error loading reconstructor onto GPU\n") ;
return EXIT_FAILURE ;
}
free(colmaj);
return EXIT_SUCCESS;
}
int free_gpurecon()
{
hipFree(rec_gpu);
hipFree(slopes_gpu);
hipFree(amps_gpu);
hipblasDestroy(handle);
return 0;
}
int gpurecon(float *slopes_host, float *amps_host)
{
hipblasStatus_t stat;
float alpha = 1.0f, beta = 0.0f;
//double t0, t1, t2;
//t0 = get_curr_t();
stat = hipblasSetVector(n_slopes, sizeof(float), slopes_host, 1, slopes_gpu, 1);
if(stat != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error sending slopes vector to GPU.\n");
return EXIT_FAILURE;
}
//t1 = get_curr_t();
stat = hipblasSgemv(handle, HIPBLAS_OP_N, n_modes, n_slopes, &alpha, rec_gpu, n_modes, slopes_gpu, 1, &beta, amps_gpu, 1);
if(stat != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error during matrix-vector multiply.\n");
return EXIT_FAILURE;
}
//t2 = get_curr_t();
stat = hipblasGetVector(n_modes, sizeof(float), amps_gpu, 1, amps_host, 1);
if(stat != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error getting amplitudes vector from GPU.\n");
return EXIT_FAILURE;
}
/*if(tw)
{
*tr = get_curr_t() - t2;
*tmm = t2 - t1;
*tw = t1 - t0;
}*/
return EXIT_SUCCESS;
}
#ifdef __cplusplus
}//extern "C"
#endif
| ceae912ee95fb4892fd500a88c3a27f1bdb662af.cu |
#include "gpurecon.h"
#ifdef __cplusplus
extern "C"
{
#endif
///Pointer to the reconstructor on the GPU, allocated and populated by calling init_gpurecon
float * rec_gpu;
///Pointer to memory for slopes on the GPU, allocated by calling init_gpurecon, updated with each call to gpurecon
float * slopes_gpu;
///Pointer to the amplitude vector on the GPU, allocated by calling init_gpurecon, filled with result of sgemv
float * amps_gpu;
///Number of modes in reconstructor (matrix rows, length or amp vector)
int n_modes;
///Number of slopes in reconstructor (matrix columns, length of slopes vector)
int n_slopes;
///Used bye cublas library
cublasHandle_t handle;
///Get the current time as a double.
double get_curr_t()
{
struct timespec tsp;
clock_gettime(CLOCK_REALTIME, &tsp);
return ((double)tsp.tv_sec) + ((double)tsp.tv_nsec)/1e9;
}
int init_gpurecon(int nm, int ns, float *rec_host)
{
cudaError_t cudaStat;
cublasStatus_t stat;
//Initialize the cublas library
stat = cublasCreate(&handle);
if ( stat != CUBLAS_STATUS_SUCCESS )
{
fprintf (stderr, "CUBLAS initialization failed\n" ) ;
return EXIT_FAILURE;
}
n_modes = nm;
n_slopes = ns;
//Convert to column major storage.
float *colmaj = (float *) malloc(nm*ns*sizeof(float));
if(colmaj == 0)
{
fprintf(stderr, "Allocation of main memory for reconstructor col-major failed.\n");
return EXIT_FAILURE;
}
for(int i=0;i<nm;i++)
{
for(int j=0;j<ns;j++)
{
colmaj[j*nm +i] = rec_host[i*ns + j];
}
}
cudaStat = cudaMalloc((void **)&rec_gpu, n_modes*n_slopes*sizeof(float));
if( cudaStat != cudaSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
cudaStat = cudaMalloc((void **)&slopes_gpu, n_slopes * sizeof(float));
if( cudaStat != cudaSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
cudaStat = cudaMalloc((void **) &s_gpu, n_modes*sizeof(float));
if( cudaStat != cudaSuccess )
{
fprintf(stderr, "GPU memory allocation failed\n") ;
return EXIT_FAILURE ;
}
//stat = cublasSetMatrix ( M , N , s i z e o f (* a ) , a , M , devPtrA , M ) ;
stat = cublasSetMatrix(n_modes, n_slopes, sizeof(float), colmaj, n_modes, rec_gpu, n_modes);
if ( stat != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error loading reconstructor onto GPU\n") ;
return EXIT_FAILURE ;
}
free(colmaj);
return EXIT_SUCCESS;
}
int free_gpurecon()
{
cudaFree(rec_gpu);
cudaFree(slopes_gpu);
cudaFree(amps_gpu);
cublasDestroy(handle);
return 0;
}
int gpurecon(float *slopes_host, float *amps_host)
{
cublasStatus_t stat;
float alpha = 1.0f, beta = 0.0f;
//double t0, t1, t2;
//t0 = get_curr_t();
stat = cublasSetVector(n_slopes, sizeof(float), slopes_host, 1, slopes_gpu, 1);
if(stat != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error sending slopes vector to GPU.\n");
return EXIT_FAILURE;
}
//t1 = get_curr_t();
stat = cublasSgemv(handle, CUBLAS_OP_N, n_modes, n_slopes, &alpha, rec_gpu, n_modes, slopes_gpu, 1, &beta, amps_gpu, 1);
if(stat != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error during matrix-vector multiply.\n");
return EXIT_FAILURE;
}
//t2 = get_curr_t();
stat = cublasGetVector(n_modes, sizeof(float), amps_gpu, 1, amps_host, 1);
if(stat != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "Error getting amplitudes vector from GPU.\n");
return EXIT_FAILURE;
}
/*if(tw)
{
*tr = get_curr_t() - t2;
*tmm = t2 - t1;
*tw = t1 - t0;
}*/
return EXIT_SUCCESS;
}
#ifdef __cplusplus
}//extern "C"
#endif
|
7d57fa66b2f8febcbf815fc95bc048598df38626.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include "error.cuh"
#define BX blockIdx.x
#define BY blockIdx.y
#define TX threadIdx.x
#define TY threadIdx.y
#define DX blockDim.x
#define DY blockDim.y
#define TIDX blockDim.x * blockIdx.x + threadIdx.x
#define TIDY blockDim.y * blockIdx.y + threadIdx.y
template<typename T> __global__ void mm_kernel(const T * a, const T * b, T * c, const int ra, const int ca, const int cb);
template<typename T>
void mm_device(const T * a, const T * b, T * c, const int ra, const int ca, const int cb) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
T * ad; T * bd; T * cd;
size_t sz_a = ra * ca * sizeof(T);
size_t sz_b = ca * cb * sizeof(T);
size_t sz_c = ra * cb * sizeof(T);
gpuCheckErr(hipMalloc((void**)&ad, sz_a));
gpuCheckErr(hipMalloc((void**)&bd, sz_b));
gpuCheckErr(hipMalloc((void**)&cd, sz_c));
gpuCheckErr(hipMemcpy(ad, a, sz_a, hipMemcpyHostToDevice));
gpuCheckErr(hipMemcpy(bd, b, sz_b, hipMemcpyHostToDevice));
gpuCheckErr(hipMemcpy(cd, c, sz_c, hipMemcpyHostToDevice));
dim3 Threads(16, 16);
int bx = (cb + Threads.x - 1) / Threads.x; bx = bx < 1024 ? bx : 1024;
int by = (ra + Threads.y - 1) / Threads.y; by = by < 1024 ? by : 1024;
dim3 Grid(bx, by, 1);
//printf("..dbg a(%d, %d), b(%d, %d), grid(%d,%d), threads(%d,%d)\n", ra, ca, ca, cb, bx, by, Threads.x, Threads.y);
hipEventRecord(start);
hipLaunchKernelGGL(( mm_kernel<T>), dim3(Grid), dim3(Threads), 0, 0, ad, bd, cd, ra, ca, cb);
gpuCheckErr(hipPeekAtLastError());
gpuCheckErr(hipDeviceSynchronize());
hipEventRecord(stop);
hipMemcpy(c, cd, sz_c, hipMemcpyDeviceToHost);
hipFree(ad);
hipFree(bd);
hipFree(cd);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
printf("..gpu_mm(%3.1fms)\n", ms);
}
template<typename T> __global__ void mm_kernel(const T * a, const T * b, T * c, const int ra, const int ca, const int cb) {
// note : assumed that thread indices cover matrix
int tx = TIDX; // col
int ty = TIDY; // row
if (tx >= cb || ty >= ra) return;
const int r_ca = ca - ca / DX * DX;
int num_mults = ca / DX;
int mm = (r_ca > 0 ? num_mults + 1 : num_mults);
int cidx = ty * cb + tx;
for (int i = 0; i < mm; ++i) {
int sa = DY * (i + ca * BY); // move to "right" in matrix "A" by 16x16 chunks
int sb = DX * (i * cb + BX); // move "down" matrix B by 16x16 chunks
const T * sm_a = &(a[sa]); // collect sub-matrix of A
const T * sm_b = &(b[sb]); // collect sub-matrix of B
// fill one element of result matrix "c"
int mx = i >= num_mults ? r_ca : DX;
int cc = ca * TY;
for (int j = 0; j < mx; ++j) {
c[cidx] += sm_a[cc + j] * sm_b[cb * j + TX];
}
//__syncthreads();
}
}
//----------------------------------
// template specializations
//----------------------------------
template void mm_device(const char * a, const char * b, char * c, const int ra, const int ca, const int cb);
template void mm_device(const int * a, const int * b, int * c, const int ra, const int ca, const int cb);
template void mm_device(const float * a, const float * b, float * c, const int ra, const int ca, const int cb);
template void mm_device(const double * a, const double * b, double * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const char * a, const char * b, char * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const int * a, const int * b, int * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const float * a, const float * b, float * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const double * a, const double * b, double * c, const int ra, const int ca, const int cb);
| 7d57fa66b2f8febcbf815fc95bc048598df38626.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include "error.cuh"
#define BX blockIdx.x
#define BY blockIdx.y
#define TX threadIdx.x
#define TY threadIdx.y
#define DX blockDim.x
#define DY blockDim.y
#define TIDX blockDim.x * blockIdx.x + threadIdx.x
#define TIDY blockDim.y * blockIdx.y + threadIdx.y
template<typename T> __global__ void mm_kernel(const T * a, const T * b, T * c, const int ra, const int ca, const int cb);
template<typename T>
void mm_device(const T * a, const T * b, T * c, const int ra, const int ca, const int cb) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
T * ad; T * bd; T * cd;
size_t sz_a = ra * ca * sizeof(T);
size_t sz_b = ca * cb * sizeof(T);
size_t sz_c = ra * cb * sizeof(T);
gpuCheckErr(cudaMalloc((void**)&ad, sz_a));
gpuCheckErr(cudaMalloc((void**)&bd, sz_b));
gpuCheckErr(cudaMalloc((void**)&cd, sz_c));
gpuCheckErr(cudaMemcpy(ad, a, sz_a, cudaMemcpyHostToDevice));
gpuCheckErr(cudaMemcpy(bd, b, sz_b, cudaMemcpyHostToDevice));
gpuCheckErr(cudaMemcpy(cd, c, sz_c, cudaMemcpyHostToDevice));
dim3 Threads(16, 16);
int bx = (cb + Threads.x - 1) / Threads.x; bx = bx < 1024 ? bx : 1024;
int by = (ra + Threads.y - 1) / Threads.y; by = by < 1024 ? by : 1024;
dim3 Grid(bx, by, 1);
//printf("..dbg a(%d, %d), b(%d, %d), grid(%d,%d), threads(%d,%d)\n", ra, ca, ca, cb, bx, by, Threads.x, Threads.y);
cudaEventRecord(start);
mm_kernel<T><<<Grid, Threads>>>(ad, bd, cd, ra, ca, cb);
gpuCheckErr(cudaPeekAtLastError());
gpuCheckErr(cudaDeviceSynchronize());
cudaEventRecord(stop);
cudaMemcpy(c, cd, sz_c, cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
printf("..gpu_mm(%3.1fms)\n", ms);
}
template<typename T> __global__ void mm_kernel(const T * a, const T * b, T * c, const int ra, const int ca, const int cb) {
// note : assumed that thread indices cover matrix
int tx = TIDX; // col
int ty = TIDY; // row
if (tx >= cb || ty >= ra) return;
const int r_ca = ca - ca / DX * DX;
int num_mults = ca / DX;
int mm = (r_ca > 0 ? num_mults + 1 : num_mults);
int cidx = ty * cb + tx;
for (int i = 0; i < mm; ++i) {
int sa = DY * (i + ca * BY); // move to "right" in matrix "A" by 16x16 chunks
int sb = DX * (i * cb + BX); // move "down" matrix B by 16x16 chunks
const T * sm_a = &(a[sa]); // collect sub-matrix of A
const T * sm_b = &(b[sb]); // collect sub-matrix of B
// fill one element of result matrix "c"
int mx = i >= num_mults ? r_ca : DX;
int cc = ca * TY;
for (int j = 0; j < mx; ++j) {
c[cidx] += sm_a[cc + j] * sm_b[cb * j + TX];
}
//__syncthreads();
}
}
//----------------------------------
// template specializations
//----------------------------------
template void mm_device(const char * a, const char * b, char * c, const int ra, const int ca, const int cb);
template void mm_device(const int * a, const int * b, int * c, const int ra, const int ca, const int cb);
template void mm_device(const float * a, const float * b, float * c, const int ra, const int ca, const int cb);
template void mm_device(const double * a, const double * b, double * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const char * a, const char * b, char * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const int * a, const int * b, int * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const float * a, const float * b, float * c, const int ra, const int ca, const int cb);
template __global__ void mm_kernel(const double * a, const double * b, double * c, const int ra, const int ca, const int cb);
|
65c4135537b7179564dd1ff85a673c2d252f9464.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent packet traversal" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
Adds persistent threads to the algorithm explained in:
"Realtime ray tracing on GPU with BVH-based packet",
Johannes Guenther, Stefan Popov, Hans-Peter Seidel and Philipp Slusallek,
Proc. IEEE/Eurographics Symposium on Interactive Ray Tracing 2007, 113118.
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE (23+32) // Size of the traversal stack in shared memory.
extern "C" {__device__ int g_warpCounter;} // Work counter for persistent threads.
//------------------------------------------------------------------------
__device__ void reduceSum(int* red, int tidx) // Warp-wide integer sum.
{
red[tidx] += red[tidx ^ 1];
red[tidx] += red[tidx ^ 2];
red[tidx] += red[tidx ^ 4];
red[tidx] += red[tidx ^ 8];
red[tidx] += red[tidx ^ 16];
}
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Shared memory arrays.
__shared__ RayStruct shared[32 * MaxBlockHeight + 1];
__shared__ volatile int s_stack[STACK_SIZE * MaxBlockHeight];
__shared__ volatile int s_stackPtr[MaxBlockHeight]; // NOTE: This could equally well be in a register.
__shared__ volatile int s_red[32 * MaxBlockHeight];
RayStruct* aux = shared + threadIdx.x + (blockDim.x * threadIdx.y);
volatile int* stack = s_stack + STACK_SIZE * threadIdx.y;
volatile int* red = s_red + 32 * threadIdx.y;
volatile int& stackPtr = s_stackPtr[threadIdx.y];
// Live state during traversal, stored in registers.
int tidx = threadIdx.x; // Lane index within warp.
int widx = threadIdx.y; // Warp index within block.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
bool valid; // False if the ray is degenerate.
int nodeAddr; // Current node, negative if leaf.
bool terminated; // Whether the traversal has been terminated.
float hitT; // t-value of the closest intersection.
float u, v; // UV barycentric coordinates
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
__shared__ volatile int rayCountArray[MaxBlockHeight]; // Number of rays in the local pool.
nextRayArray[threadIdx.y] = 0;
rayCountArray[threadIdx.y] = 0;
// Persistent threads: fetch and process rays in a loop.
do
{
volatile int& localPoolRayCount = rayCountArray[widx];
volatile int& localPoolNextRay = nextRayArray[widx];
// Local pool is empty => fetch new rays from the global pool using lane 0.
if (tidx == 0 && localPoolRayCount <= 0)
{
localPoolNextRay = atomicAdd(&g_warpCounter, LOAD_BALANCER_BATCH_SIZE);
localPoolRayCount = LOAD_BALANCER_BATCH_SIZE;
}
// Pick 32 rays from the local pool.
// Out of work => done.
{
rayidx = localPoolNextRay + tidx;
if (rayidx >= numRays)
break;
if (tidx == 0)
{
localPoolNextRay += 32;
localPoolRayCount -= 32;
}
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
valid = (o.w < d.w);
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
// Setup traversal.
stackPtr = -1; // Stack is empty.
nodeAddr = 0; // Start from the root.
terminated = false; // Not terminated yet.
STORE_RESULT(rayidx, -1, 0.0f, u, v); // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (valid)
{
// Internal node => intersect children.
if (nodeAddr >= 0)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Perform warp-wide vote to decide where to go.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
bool anyc0 = __any(traverseChild0);
bool anyc1 = __any(traverseChild1);
int nodeAddrChild0 = __float_as_int(cnodes.x); // Stored as int.
int nodeAddrChild1 = __float_as_int(cnodes.y); // Stored as int.
// Both children were intersected => vote which one to visit first.
if (anyc0 && anyc1)
{
red[tidx] = (c1min < c0min) ? 1 : -1;
reduceSum((int*)red, tidx);
if (red[tidx] >= 0)
swap(nodeAddrChild0, nodeAddrChild1);
nodeAddr = nodeAddrChild0;
if (tidx == 0)
{
stackPtr++;
stack[stackPtr] = nodeAddrChild1; // Lane 0 writes.
}
}
// Only one child was intersected => go there.
else if (anyc0)
{
nodeAddr = nodeAddrChild0;
}
else if (anyc1)
{
nodeAddr = nodeAddrChild1;
}
// Neither child was intersected => pop.
else
{
if (stackPtr < 0)
break;
else
{
nodeAddr = stack[stackPtr]; // All lanes read.
if (tidx == 0)
stackPtr--; // Lane 0 decrements.
}
}
} // internal node
// Leaf node => intersect triangles.
if (nodeAddr < 0)
{
// Fetch the start and end of the triangle list.
nodeAddr = -nodeAddr-1;
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf = FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 leaf = FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
int triAddr = __float_as_int(leaf.x); // Stored as int.
int triAddr2 = __float_as_int(leaf.y); // Stored as int.
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
STORE_RESULT(rayidx, FETCH_GLOBAL(triIndices, triAddr, int), t, u, v);
if (anyHit)
terminated = true; // NOTE: Cannot break because packet traversal!
}
}
}
} // triangle
// All lanes have terminated => traversal done.
if (__all(terminated))
break;
// Pop stack.
if (stackPtr < 0)
break;
else
{
nodeAddr = stack[stackPtr]; // Everyone reads.
if (tidx == 0)
stackPtr--; // Lane 0 decrements.
}
} // leaf node
} // traversal loop
} while(aux); // persistent threads (always true)
}
//------------------------------------------------------------------------
| 65c4135537b7179564dd1ff85a673c2d252f9464.cu | /*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent packet traversal" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
Adds persistent threads to the algorithm explained in:
"Realtime ray tracing on GPU with BVH-based packet",
Johannes Guenther, Stefan Popov, Hans-Peter Seidel and Philipp Slusallek,
Proc. IEEE/Eurographics Symposium on Interactive Ray Tracing 2007, 113–118.
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE (23+32) // Size of the traversal stack in shared memory.
extern "C" {__device__ int g_warpCounter;} // Work counter for persistent threads.
//------------------------------------------------------------------------
__device__ void reduceSum(int* red, int tidx) // Warp-wide integer sum.
{
red[tidx] += red[tidx ^ 1];
red[tidx] += red[tidx ^ 2];
red[tidx] += red[tidx ^ 4];
red[tidx] += red[tidx ^ 8];
red[tidx] += red[tidx ^ 16];
}
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Shared memory arrays.
__shared__ RayStruct shared[32 * MaxBlockHeight + 1];
__shared__ volatile int s_stack[STACK_SIZE * MaxBlockHeight];
__shared__ volatile int s_stackPtr[MaxBlockHeight]; // NOTE: This could equally well be in a register.
__shared__ volatile int s_red[32 * MaxBlockHeight];
RayStruct* aux = shared + threadIdx.x + (blockDim.x * threadIdx.y);
volatile int* stack = s_stack + STACK_SIZE * threadIdx.y;
volatile int* red = s_red + 32 * threadIdx.y;
volatile int& stackPtr = s_stackPtr[threadIdx.y];
// Live state during traversal, stored in registers.
int tidx = threadIdx.x; // Lane index within warp.
int widx = threadIdx.y; // Warp index within block.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
bool valid; // False if the ray is degenerate.
int nodeAddr; // Current node, negative if leaf.
bool terminated; // Whether the traversal has been terminated.
float hitT; // t-value of the closest intersection.
float u, v; // UV barycentric coordinates
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
__shared__ volatile int rayCountArray[MaxBlockHeight]; // Number of rays in the local pool.
nextRayArray[threadIdx.y] = 0;
rayCountArray[threadIdx.y] = 0;
// Persistent threads: fetch and process rays in a loop.
do
{
volatile int& localPoolRayCount = rayCountArray[widx];
volatile int& localPoolNextRay = nextRayArray[widx];
// Local pool is empty => fetch new rays from the global pool using lane 0.
if (tidx == 0 && localPoolRayCount <= 0)
{
localPoolNextRay = atomicAdd(&g_warpCounter, LOAD_BALANCER_BATCH_SIZE);
localPoolRayCount = LOAD_BALANCER_BATCH_SIZE;
}
// Pick 32 rays from the local pool.
// Out of work => done.
{
rayidx = localPoolNextRay + tidx;
if (rayidx >= numRays)
break;
if (tidx == 0)
{
localPoolNextRay += 32;
localPoolRayCount -= 32;
}
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
valid = (o.w < d.w);
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
// Setup traversal.
stackPtr = -1; // Stack is empty.
nodeAddr = 0; // Start from the root.
terminated = false; // Not terminated yet.
STORE_RESULT(rayidx, -1, 0.0f, u, v); // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (valid)
{
// Internal node => intersect children.
if (nodeAddr >= 0)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Perform warp-wide vote to decide where to go.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
bool anyc0 = __any(traverseChild0);
bool anyc1 = __any(traverseChild1);
int nodeAddrChild0 = __float_as_int(cnodes.x); // Stored as int.
int nodeAddrChild1 = __float_as_int(cnodes.y); // Stored as int.
// Both children were intersected => vote which one to visit first.
if (anyc0 && anyc1)
{
red[tidx] = (c1min < c0min) ? 1 : -1;
reduceSum((int*)red, tidx);
if (red[tidx] >= 0)
swap(nodeAddrChild0, nodeAddrChild1);
nodeAddr = nodeAddrChild0;
if (tidx == 0)
{
stackPtr++;
stack[stackPtr] = nodeAddrChild1; // Lane 0 writes.
}
}
// Only one child was intersected => go there.
else if (anyc0)
{
nodeAddr = nodeAddrChild0;
}
else if (anyc1)
{
nodeAddr = nodeAddrChild1;
}
// Neither child was intersected => pop.
else
{
if (stackPtr < 0)
break;
else
{
nodeAddr = stack[stackPtr]; // All lanes read.
if (tidx == 0)
stackPtr--; // Lane 0 decrements.
}
}
} // internal node
// Leaf node => intersect triangles.
if (nodeAddr < 0)
{
// Fetch the start and end of the triangle list.
nodeAddr = -nodeAddr-1;
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf = FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 leaf = FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
int triAddr = __float_as_int(leaf.x); // Stored as int.
int triAddr2 = __float_as_int(leaf.y); // Stored as int.
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
STORE_RESULT(rayidx, FETCH_GLOBAL(triIndices, triAddr, int), t, u, v);
if (anyHit)
terminated = true; // NOTE: Cannot break because packet traversal!
}
}
}
} // triangle
// All lanes have terminated => traversal done.
if (__all(terminated))
break;
// Pop stack.
if (stackPtr < 0)
break;
else
{
nodeAddr = stack[stackPtr]; // Everyone reads.
if (tidx == 0)
stackPtr--; // Lane 0 decrements.
}
} // leaf node
} // traversal loop
} while(aux); // persistent threads (always true)
}
//------------------------------------------------------------------------
|
7d8eca22c548765428ae84a250e5943275075392.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 18
#define FILTER_LENGTH 9
#define FILTER_RADIUS 1
__constant__ unsigned char c_Filter[FILTER_LENGTH];
extern "C" void setFilter(unsigned char *h_Filter)
{
hipMemcpyToSymbol(c_Filter, h_Filter, FILTER_LENGTH * sizeof(unsigned char));
}
__device__ int dOffset(int x, int y,int imageW) {
return x*imageW + y;
}
__device__ int fOffset(int x, int y) {
return x*(2*FILTER_RADIUS + 1) + y;
}
__global__ void filter(unsigned char* d_data,unsigned char* d_results,int imageW,int imageH)
{
int k,l;
const int gi = blockIdx.y * blockDim.y + threadIdx.y;
const int gj = blockIdx.x * blockDim.x + threadIdx.x;
int outPixel = 0;
if(gi < imageH && gj < imageW)
{
for(k=-1;k<=1;k++)
{
for(l=-1;l<=1;l++)
{
if ( (gi+k)>=0 && (gi+k)<imageH && (gj+l)>=0 && (gj+l)<imageW )
{
outPixel += d_data[dOffset(gi+k,gj+l,imageW)] * c_Filter[fOffset(k+1,l+1)];
}
else
{
outPixel += d_data[dOffset(gi,gj,imageW)] * c_Filter[fOffset(k+1,l+1)];
}
}
}
d_results[dOffset(gi,gj,imageW)] = (unsigned char)(outPixel/16);
}
}
void swap(unsigned char **d_data,unsigned char **d_results)
{
unsigned char* temp = *d_data;
*d_data = *d_results;
*d_results = temp;
}
int main()
{
int size,i,imageW,imageH;
unsigned char *h_data;
unsigned char *h_results;
unsigned char *d_data;
unsigned char *d_results;
unsigned char h_filter[9];
h_filter[0] = 1;
h_filter[1] = 2;
h_filter[2] = 1;
h_filter[3] = 2;
h_filter[4] = 4;
h_filter[5] = 2;
h_filter[6] = 1;
h_filter[7] = 2;
h_filter[8] = 1;
imageW = 1920;
imageH = 2520;
size = imageW* imageH;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
h_data =(unsigned char*)malloc(size);
h_results =(unsigned char*)malloc(size);
FILE* inputImage;
inputImage = fopen("../image.raw","rb");
fread(h_data,size,1,inputImage);
fclose(inputImage);
dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y);
int numBlocks_X = imageW / BLOCKSIZE_X;
int numBlocks_Y = imageH / BLOCKSIZE_Y;
printf("blocks x %d blocks y %d\n",numBlocks_X,numBlocks_Y );
dim3 gridSize(numBlocks_X, numBlocks_Y);
hipEventRecord(start, 0);
hipMalloc(&d_data, size);
hipMemcpy(d_data, h_data, size, hipMemcpyHostToDevice);
hipMalloc(&d_results, size);
setFilter(h_filter);
for(i = 0; i < 100; i++ )
{
hipLaunchKernelGGL(( filter), dim3(gridSize),dim3(blockSize), 0, 0, d_data,d_results,imageW,imageH);
swap(&d_data,&d_results);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(h_results, d_results, size, hipMemcpyDeviceToHost);
hipFree(d_results);
hipFree(d_data);
FILE* outputImage;
outputImage = fopen("out.raw","w+");
fwrite(h_results,size,1,outputImage);
fclose(outputImage);
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} | 7d8eca22c548765428ae84a250e5943275075392.cu | #include <stdio.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 18
#define FILTER_LENGTH 9
#define FILTER_RADIUS 1
__constant__ unsigned char c_Filter[FILTER_LENGTH];
extern "C" void setFilter(unsigned char *h_Filter)
{
cudaMemcpyToSymbol(c_Filter, h_Filter, FILTER_LENGTH * sizeof(unsigned char));
}
__device__ int dOffset(int x, int y,int imageW) {
return x*imageW + y;
}
__device__ int fOffset(int x, int y) {
return x*(2*FILTER_RADIUS + 1) + y;
}
__global__ void filter(unsigned char* d_data,unsigned char* d_results,int imageW,int imageH)
{
int k,l;
const int gi = blockIdx.y * blockDim.y + threadIdx.y;
const int gj = blockIdx.x * blockDim.x + threadIdx.x;
int outPixel = 0;
if(gi < imageH && gj < imageW)
{
for(k=-1;k<=1;k++)
{
for(l=-1;l<=1;l++)
{
if ( (gi+k)>=0 && (gi+k)<imageH && (gj+l)>=0 && (gj+l)<imageW )
{
outPixel += d_data[dOffset(gi+k,gj+l,imageW)] * c_Filter[fOffset(k+1,l+1)];
}
else
{
outPixel += d_data[dOffset(gi,gj,imageW)] * c_Filter[fOffset(k+1,l+1)];
}
}
}
d_results[dOffset(gi,gj,imageW)] = (unsigned char)(outPixel/16);
}
}
void swap(unsigned char **d_data,unsigned char **d_results)
{
unsigned char* temp = *d_data;
*d_data = *d_results;
*d_results = temp;
}
int main()
{
int size,i,imageW,imageH;
unsigned char *h_data;
unsigned char *h_results;
unsigned char *d_data;
unsigned char *d_results;
unsigned char h_filter[9];
h_filter[0] = 1;
h_filter[1] = 2;
h_filter[2] = 1;
h_filter[3] = 2;
h_filter[4] = 4;
h_filter[5] = 2;
h_filter[6] = 1;
h_filter[7] = 2;
h_filter[8] = 1;
imageW = 1920;
imageH = 2520;
size = imageW* imageH;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
h_data =(unsigned char*)malloc(size);
h_results =(unsigned char*)malloc(size);
FILE* inputImage;
inputImage = fopen("../image.raw","rb");
fread(h_data,size,1,inputImage);
fclose(inputImage);
dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y);
int numBlocks_X = imageW / BLOCKSIZE_X;
int numBlocks_Y = imageH / BLOCKSIZE_Y;
printf("blocks x %d blocks y %d\n",numBlocks_X,numBlocks_Y );
dim3 gridSize(numBlocks_X, numBlocks_Y);
cudaEventRecord(start, 0);
cudaMalloc(&d_data, size);
cudaMemcpy(d_data, h_data, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_results, size);
setFilter(h_filter);
for(i = 0; i < 100; i++ )
{
filter<<<gridSize,blockSize>>>(d_data,d_results,imageW,imageH);
swap(&d_data,&d_results);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_results, d_results, size, cudaMemcpyDeviceToHost);
cudaFree(d_results);
cudaFree(d_data);
FILE* outputImage;
outputImage = fopen("out.raw","w+");
fwrite(h_results,size,1,outputImage);
fclose(outputImage);
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} |
e469a7718cf97f988def9466f4e729d1197e7ef8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void scatter(unsigned int *in,unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n, unsigned int *d_histScan, unsigned int mask, unsigned int current_bits, unsigned int nBins)
{
if (threadIdx.x == 0)
{
unsigned int start = blockIdx.x*blockDim.x;
for (int i = start; i < min(n, start + blockDim.x) ; i++)
{
unsigned int bin = (in[i] >> current_bits) & mask;
out[d_histScan[blockIdx.x + bin*gridDim.x]] = in[i];
out_pos[d_histScan[blockIdx.x + bin*gridDim.x]] = in_pos[i];
d_histScan[blockIdx.x + bin*gridDim.x]++;
}
}
} | e469a7718cf97f988def9466f4e729d1197e7ef8.cu | #include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void scatter(unsigned int *in,unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n, unsigned int *d_histScan, unsigned int mask, unsigned int current_bits, unsigned int nBins)
{
if (threadIdx.x == 0)
{
unsigned int start = blockIdx.x*blockDim.x;
for (int i = start; i < min(n, start + blockDim.x) ; i++)
{
unsigned int bin = (in[i] >> current_bits) & mask;
out[d_histScan[blockIdx.x + bin*gridDim.x]] = in[i];
out_pos[d_histScan[blockIdx.x + bin*gridDim.x]] = in_pos[i];
d_histScan[blockIdx.x + bin*gridDim.x]++;
}
}
} |
d1e299bfcea60e01a9428ad0c069f79e262c0103.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT512_device( float2 *work );
#define rank 16
__global__ void IFFT16_device_( float2 *work )
{
int tid = threadIdx.x;
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int lo = bid & (8192/rank/64-1);
int hi = bid &~(8192/rank/64-1);
int i = lo*64 + tid;
work += hi * (rank*64) + i;
float2 a[rank];
load<rank>( a, work, 512 );
itwiddle_straight<rank>( a, i, 8192 );
IFFT16( a );
store<rank>( a, work, 512 );
}
extern "C" void IFFT8192( float2 *work, int batch )
{
hipLaunchKernelGGL(( IFFT512_device), dim3(grid2D(batch*rank)), dim3(64) , 0, 0, work );
hipLaunchKernelGGL(( IFFT16_device_), dim3(grid2D(batch*(8192/rank)/64)), dim3(64) , 0, 0, work );
}
| d1e299bfcea60e01a9428ad0c069f79e262c0103.cu | // Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT512_device( float2 *work );
#define rank 16
__global__ void IFFT16_device_( float2 *work )
{
int tid = threadIdx.x;
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int lo = bid & (8192/rank/64-1);
int hi = bid &~(8192/rank/64-1);
int i = lo*64 + tid;
work += hi * (rank*64) + i;
float2 a[rank];
load<rank>( a, work, 512 );
itwiddle_straight<rank>( a, i, 8192 );
IFFT16( a );
store<rank>( a, work, 512 );
}
extern "C" void IFFT8192( float2 *work, int batch )
{
IFFT512_device<<< grid2D(batch*rank), 64 >>>( work );
IFFT16_device_<<< grid2D(batch*(8192/rank)/64), 64 >>>( work );
}
|
da53823c5cfa9f8c7d48d34cccbf7a5f257855d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Verify that we can parse a simple CUDA file with or without -save-temps
// http://llvm.org/PR22936
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -nocudainc -nocudalib -Werror -fsyntax-only %s
//
// Verify that we pass -x cuda-cpp-output to compiler after
// preprocessing a CUDA file
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -Werror -### -save-temps -c %s 2>&1 | FileCheck %s
// CHECK-LABEL: "-cc1"
// CHECK: "-E"
// CHECK: "-x" "cuda"
// CHECK-LABEL: "-cc1"
// CHECK: "-x" "cuda-cpp-output"
//
// Verify that compiler accepts CUDA syntax with "-x cuda-cpp-output".
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -Werror -fsyntax-only -x cuda-cpp-output %s
extern "C" int hipConfigureCall(int, int);
extern "C" int __cudaPushCallConfiguration(int, int);
__attribute__((global)) void kernel() {}
void func() {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
}
| da53823c5cfa9f8c7d48d34cccbf7a5f257855d0.cu | // Verify that we can parse a simple CUDA file with or without -save-temps
// http://llvm.org/PR22936
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -nocudainc -nocudalib -Werror -fsyntax-only %s
//
// Verify that we pass -x cuda-cpp-output to compiler after
// preprocessing a CUDA file
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -Werror -### -save-temps -c %s 2>&1 | FileCheck %s
// CHECK-LABEL: "-cc1"
// CHECK: "-E"
// CHECK: "-x" "cuda"
// CHECK-LABEL: "-cc1"
// CHECK: "-x" "cuda-cpp-output"
//
// Verify that compiler accepts CUDA syntax with "-x cuda-cpp-output".
// RUN: %clang --cuda-path=%S/Inputs/CUDA/usr/local/cuda \
// RUN: -Werror -fsyntax-only -x cuda-cpp-output %s
extern "C" int cudaConfigureCall(int, int);
extern "C" int __cudaPushCallConfiguration(int, int);
__attribute__((global)) void kernel() {}
void func() {
kernel<<<1,1>>>();
}
|
5c0fdc6e1b3f5c7cb536d243ee67ae8021c6ca55.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../stdafx.h"
#include "../structurs.h"
using namespace SN_Base;
__global__ void cropIn2Out(roi roi, snSize srcSz, snFloat* in, snFloat* out){
size_t srcStp = srcSz.w * srcSz.h,
dstStp = roi.w * roi.h;
in += roi.x + roi.y * srcSz.w + srcStp * blockIdx.x + srcStp * srcSz.d * blockIdx.y;
out += dstStp * blockIdx.x + dstStp * srcSz.d * blockIdx.y;
// gridDim.x - srcSz.d
// gridDim.y - srcSz.n
unsigned int i = threadIdx.x; // blockDim.x <= roi.h
while (i < roi.h){
snFloat* pIn = in + i * srcSz.w,
* pOut = out + i * roi.w;
unsigned int j = threadIdx.y; // blockDim.y <= roi.w
while (j < roi.w){
pOut[j] = pIn[j];
j += blockDim.y;
}
i += blockDim.x;
}
}
__global__ void cropOut2In(roi roi, snSize srcSz, snFloat* in, snFloat* out){
size_t srcStp = srcSz.w * srcSz.h,
dstStp = roi.w * roi.h;
in += roi.x + roi.y * srcSz.w + srcStp * blockIdx.x + srcStp * srcSz.d * blockIdx.y;
out += dstStp * blockIdx.x + dstStp * srcSz.d * blockIdx.y;
// gridDim.x - srcSz.d
// gridDim.y - srcSz.n
unsigned int i = threadIdx.x; // blockDim.x <= roi.h
while (i < roi.h){
snFloat* pIn = in + i * srcSz.w,
* pOut = out + i * roi.w;
unsigned int j = threadIdx.y; // blockDim.y <= roi.w
while (j < roi.w){
pIn[j] = pOut[j];
j += blockDim.y;
}
i += blockDim.x;
}
}
void crop(bool inToOut, const roi& roi, const snSize& sz, snFloat* in, snFloat* out){
dim3 dimBlock(16, 16);
dim3 dimGrid(int(sz.d), int(sz.n));
if (inToOut)
cropIn2Out << < dimGrid, dimBlock >> >(roi, sz, in, out);
else
cropOut2In << < dimGrid, dimBlock >> >(roi, sz, in, out);
}
| 5c0fdc6e1b3f5c7cb536d243ee67ae8021c6ca55.cu |
#include <cuda_runtime.h>
#include "../stdafx.h"
#include "../structurs.h"
using namespace SN_Base;
__global__ void cropIn2Out(roi roi, snSize srcSz, snFloat* in, snFloat* out){
size_t srcStp = srcSz.w * srcSz.h,
dstStp = roi.w * roi.h;
in += roi.x + roi.y * srcSz.w + srcStp * blockIdx.x + srcStp * srcSz.d * blockIdx.y;
out += dstStp * blockIdx.x + dstStp * srcSz.d * blockIdx.y;
// gridDim.x - srcSz.d
// gridDim.y - srcSz.n
unsigned int i = threadIdx.x; // blockDim.x <= roi.h
while (i < roi.h){
snFloat* pIn = in + i * srcSz.w,
* pOut = out + i * roi.w;
unsigned int j = threadIdx.y; // blockDim.y <= roi.w
while (j < roi.w){
pOut[j] = pIn[j];
j += blockDim.y;
}
i += blockDim.x;
}
}
__global__ void cropOut2In(roi roi, snSize srcSz, snFloat* in, snFloat* out){
size_t srcStp = srcSz.w * srcSz.h,
dstStp = roi.w * roi.h;
in += roi.x + roi.y * srcSz.w + srcStp * blockIdx.x + srcStp * srcSz.d * blockIdx.y;
out += dstStp * blockIdx.x + dstStp * srcSz.d * blockIdx.y;
// gridDim.x - srcSz.d
// gridDim.y - srcSz.n
unsigned int i = threadIdx.x; // blockDim.x <= roi.h
while (i < roi.h){
snFloat* pIn = in + i * srcSz.w,
* pOut = out + i * roi.w;
unsigned int j = threadIdx.y; // blockDim.y <= roi.w
while (j < roi.w){
pIn[j] = pOut[j];
j += blockDim.y;
}
i += blockDim.x;
}
}
void crop(bool inToOut, const roi& roi, const snSize& sz, snFloat* in, snFloat* out){
dim3 dimBlock(16, 16);
dim3 dimGrid(int(sz.d), int(sz.n));
if (inToOut)
cropIn2Out << < dimGrid, dimBlock >> >(roi, sz, in, out);
else
cropOut2In << < dimGrid, dimBlock >> >(roi, sz, in, out);
}
|
05c291a92697d0861ee6b6a80d4cd5d1cdf3c62d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Atomic.cuh>
__global__ void index_mul_2d_float_dim64(
float *out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 res, src1, src2;
src1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
res.x = src1.x * src2.x;
res.y = src1.y * src2.y;
res.z = src1.z * src2.z;
res.w = src1.w * src2.w;
reinterpret_cast<float4 *>(out)[vec_idx2] = res;
}
}
__global__ void index_mul_2d_float(
float *out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim);
int64_t vec_idx2 = (start_idx * fea_dim);
for (int i = tidx; i < fea_dim; i += stride) {
out[vec_idx2 + i] = in1[vec_idx1 + i] * in2[vec_idx2 + i];
}
}
}
__global__ void index_mul_2d_half(
at::Half *out,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim);
int64_t vec_idx2 = (start_idx * fea_dim);
for (int i = tidx; i < fea_dim; i += stride) {
out[vec_idx2 + i] = at::Half(static_cast<float>(in1[vec_idx1 + i]) * static_cast<float>(in2[vec_idx2 + i]));
}
}
}
__global__ void index_mul_2d_grad_float_dim64(
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 src_in1, src_in2, src_grad_out, dst_grad_in2;
src_grad_out = reinterpret_cast<const float4 *>(grad_out)[vec_idx2];
src_in1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src_in2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
int64_t grad_in1_base_idx = idx1[start_idx] * fea_dim + tidx * 4;
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 0, src_grad_out.x * src_in2.x);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 1, src_grad_out.y * src_in2.y);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 2, src_grad_out.z * src_in2.z);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 3, src_grad_out.w * src_in2.w);
dst_grad_in2.x = src_grad_out.x * src_in1.x;
dst_grad_in2.y = src_grad_out.y * src_in1.y;
dst_grad_in2.z = src_grad_out.z * src_in1.z;
dst_grad_in2.w = src_grad_out.w * src_in1.w;
reinterpret_cast<float4 *>(grad_in2)[vec_idx2] = dst_grad_in2;
}
}
__global__ void index_mul_2d_grad_float(
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_in1 = in1[vec_idx1 + i];
float src_in2 = in2[vec_idx2 + i];
float src_grad_out = grad_out[vec_idx2 + i];
grad_in2[vec_idx2 + i] = src_grad_out * src_in1;
gpuAtomicAdd(grad_in1 + vec_idx1 + i, src_grad_out * src_in2);
}
}
}
__global__ void index_mul_2d_grad_half(
at::Half *grad_in1,
at::Half *grad_in2,
const at::Half *grad_out,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_in1 = static_cast<float>(in1[vec_idx1 + i]);
float src_in2 = static_cast<float>(in2[vec_idx2 + i]);
float src_grad_out = static_cast<float>(grad_out[vec_idx2 + i]);
grad_in2[vec_idx2 + i] = at::Half(src_grad_out * src_in1);
gpuAtomicAdd(grad_in1 + vec_idx1 + i, at::Half(src_grad_out * src_in2));
}
}
}
__global__ void index_mul_2d_grad_grad_float_dim64(
float *grad_grad_out,
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *grad_grad_in1,
const float *grad_grad_in2,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 src_grad_grad_in1, src_in1, src_grad_grad_in2, src_in2, src_grad_out;
float4 dst_grad_grad_out, dst_grad_in2;
src_grad_grad_in1 = reinterpret_cast<const float4 *>(grad_grad_in1)[vec_idx1];
src_in1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src_grad_grad_in2 = reinterpret_cast<const float4 *>(grad_grad_in2)[vec_idx2];
src_in2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
dst_grad_grad_out.x = src_grad_grad_in1.x * src_in2.x + src_grad_grad_in2.x * src_in1.x;
dst_grad_grad_out.y = src_grad_grad_in1.y * src_in2.y + src_grad_grad_in2.y * src_in1.y;
dst_grad_grad_out.z = src_grad_grad_in1.z * src_in2.z + src_grad_grad_in2.z * src_in1.z;
dst_grad_grad_out.w = src_grad_grad_in1.w * src_in2.w + src_grad_grad_in2.w * src_in1.w;
reinterpret_cast<float4 *>(grad_grad_out)[vec_idx2] = dst_grad_grad_out;
src_grad_out = reinterpret_cast<const float4 *>(grad_out)[vec_idx2];
int64_t grad_in1_base_idx = idx1[start_idx] * fea_dim + tidx * 4;
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 0, src_grad_grad_in2.x * src_grad_out.x);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 1, src_grad_grad_in2.y * src_grad_out.y);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 2, src_grad_grad_in2.z * src_grad_out.z);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 3, src_grad_grad_in2.w * src_grad_out.w);
dst_grad_in2.x = src_grad_grad_in1.x * src_grad_out.x;
dst_grad_in2.y = src_grad_grad_in1.y * src_grad_out.y;
dst_grad_in2.z = src_grad_grad_in1.z * src_grad_out.z;
dst_grad_in2.w = src_grad_grad_in1.w * src_grad_out.w;
reinterpret_cast<float4 *>(grad_in2)[vec_idx2] = dst_grad_in2;
}
}
__global__ void index_mul_2d_grad_grad_float(
float *grad_grad_out,
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *grad_grad_in1,
const float *grad_grad_in2,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_grad_grad_in1 = grad_grad_in1[vec_idx1 + i];
float src_grad_grad_in2 = grad_grad_in2[vec_idx2 + i];
float src_in1 = in1[vec_idx1 + i];
float src_in2 = in2[vec_idx2 + i];
float src_grad_out = grad_out[vec_idx2 + i];
grad_grad_out[vec_idx2 + i] = src_grad_grad_in1 * src_in2 + src_grad_grad_in2 * src_in1;
grad_in2[vec_idx2 + i] = src_grad_grad_in1 * src_grad_out;
gpuAtomicAdd(grad_in1 + vec_idx1 + i, src_grad_grad_in2 * src_grad_out);
}
}
}
__global__ void index_mul_2d_grad_grad_half(
at::Half *grad_grad_out,
at::Half *grad_in1,
at::Half *grad_in2,
const at::Half *grad_out,
const at::Half *grad_grad_in1,
const at::Half *grad_grad_in2,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_grad_grad_in1 = static_cast<float>(grad_grad_in1[vec_idx1 + i]);
float src_grad_grad_in2 = static_cast<float>(grad_grad_in2[vec_idx2 + i]);
float src_in1 = static_cast<float>(in1[vec_idx1 + i]);
float src_in2 = static_cast<float>(in2[vec_idx2 + i]);
float src_grad_out = static_cast<float>(grad_out[vec_idx2 + i]);
grad_grad_out[vec_idx2 + i] = at::Half(src_grad_grad_in1 * src_in2 + src_grad_grad_in2 * src_in1);
grad_in2[vec_idx2 + i] = at::Half(src_grad_grad_in1 * src_grad_out);
gpuAtomicAdd(grad_in1 + vec_idx1 + i, at::Half(src_grad_grad_in2 * src_grad_out));
}
}
}
void index_mul_2d_float_foward_cuda(at::Tensor &out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_float_dim64), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
out.data_ptr<float>(), in1.data_ptr<float>(), in2.data_ptr<float>(),
idx1.data_ptr<int64_t>(), size);
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_float), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
out.data_ptr<float>(), in1.data_ptr<float>(), in2.data_ptr<float>(),
idx1.data_ptr<int64_t>(), size, fea_dim);
}
AT_CUDA_CHECK(hipGetLastError());
}
void index_mul_2d_float_backward_cuda(at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_float_dim64), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(), grad_out.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size);
AT_CUDA_CHECK(hipGetLastError());
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_float), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(), grad_out.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
}
void index_mul_2d_float_backward_backward_cuda(at::Tensor &grad_grad_out,
at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &grad_grad_in1,
const at::Tensor &grad_grad_in2,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_grad_float_dim64), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_grad_out.data_ptr<float>(), grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(),
grad_out.data_ptr<float>(), grad_grad_in1.data_ptr<float>(), grad_grad_in2.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size);
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_grad_float), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_grad_out.data_ptr<float>(), grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(),
grad_out.data_ptr<float>(), grad_grad_in1.data_ptr<float>(), grad_grad_in2.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
AT_CUDA_CHECK(hipGetLastError());
}
void index_mul_2d_half_foward_cuda(at::Tensor &out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_half), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
out.data_ptr<at::Half>(), in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(),
idx1.data_ptr<int64_t>(), size, fea_dim);
AT_CUDA_CHECK(hipGetLastError());
}
void index_mul_2d_half_backward_cuda(at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_half), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_in1.data_ptr<at::Half>(), grad_in2.data_ptr<at::Half>(), grad_out.data_ptr<at::Half>(),
in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
void index_mul_2d_half_backward_backward_cuda(at::Tensor &grad_grad_out,
at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &grad_grad_in1,
const at::Tensor &grad_grad_in2,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
hipLaunchKernelGGL(( index_mul_2d_grad_grad_half), dim3(BLOCK_NUMS), dim3({BLOCK_THREADS_DIMX), BLOCK_THREADS_DIMY, 1}, 0, stream,
grad_grad_out.data_ptr<at::Half>(), grad_in1.data_ptr<at::Half>(), grad_in2.data_ptr<at::Half>(),
grad_out.data_ptr<at::Half>(), grad_grad_in1.data_ptr<at::Half>(), grad_grad_in2.data_ptr<at::Half>(),
in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(), idx1.data_ptr<int64_t>(), size, fea_dim);
AT_CUDA_CHECK(hipGetLastError());
} | 05c291a92697d0861ee6b6a80d4cd5d1cdf3c62d.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Atomic.cuh>
__global__ void index_mul_2d_float_dim64(
float *out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 res, src1, src2;
src1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
res.x = src1.x * src2.x;
res.y = src1.y * src2.y;
res.z = src1.z * src2.z;
res.w = src1.w * src2.w;
reinterpret_cast<float4 *>(out)[vec_idx2] = res;
}
}
__global__ void index_mul_2d_float(
float *out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim);
int64_t vec_idx2 = (start_idx * fea_dim);
for (int i = tidx; i < fea_dim; i += stride) {
out[vec_idx2 + i] = in1[vec_idx1 + i] * in2[vec_idx2 + i];
}
}
}
__global__ void index_mul_2d_half(
at::Half *out,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim);
int64_t vec_idx2 = (start_idx * fea_dim);
for (int i = tidx; i < fea_dim; i += stride) {
out[vec_idx2 + i] = at::Half(static_cast<float>(in1[vec_idx1 + i]) * static_cast<float>(in2[vec_idx2 + i]));
}
}
}
__global__ void index_mul_2d_grad_float_dim64(
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 src_in1, src_in2, src_grad_out, dst_grad_in2;
src_grad_out = reinterpret_cast<const float4 *>(grad_out)[vec_idx2];
src_in1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src_in2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
int64_t grad_in1_base_idx = idx1[start_idx] * fea_dim + tidx * 4;
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 0, src_grad_out.x * src_in2.x);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 1, src_grad_out.y * src_in2.y);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 2, src_grad_out.z * src_in2.z);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 3, src_grad_out.w * src_in2.w);
dst_grad_in2.x = src_grad_out.x * src_in1.x;
dst_grad_in2.y = src_grad_out.y * src_in1.y;
dst_grad_in2.z = src_grad_out.z * src_in1.z;
dst_grad_in2.w = src_grad_out.w * src_in1.w;
reinterpret_cast<float4 *>(grad_in2)[vec_idx2] = dst_grad_in2;
}
}
__global__ void index_mul_2d_grad_float(
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_in1 = in1[vec_idx1 + i];
float src_in2 = in2[vec_idx2 + i];
float src_grad_out = grad_out[vec_idx2 + i];
grad_in2[vec_idx2 + i] = src_grad_out * src_in1;
gpuAtomicAdd(grad_in1 + vec_idx1 + i, src_grad_out * src_in2);
}
}
}
__global__ void index_mul_2d_grad_half(
at::Half *grad_in1,
at::Half *grad_in2,
const at::Half *grad_out,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_in1 = static_cast<float>(in1[vec_idx1 + i]);
float src_in2 = static_cast<float>(in2[vec_idx2 + i]);
float src_grad_out = static_cast<float>(grad_out[vec_idx2 + i]);
grad_in2[vec_idx2 + i] = at::Half(src_grad_out * src_in1);
gpuAtomicAdd(grad_in1 + vec_idx1 + i, at::Half(src_grad_out * src_in2));
}
}
}
__global__ void index_mul_2d_grad_grad_float_dim64(
float *grad_grad_out,
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *grad_grad_in1,
const float *grad_grad_in2,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
constexpr int fea_dim = 64;
if (start_idx < size) {
int64_t vec_idx1 = (idx1[start_idx] * fea_dim) / 4 + tidx;
int64_t vec_idx2 = (start_idx * fea_dim) / 4 + tidx;
float4 src_grad_grad_in1, src_in1, src_grad_grad_in2, src_in2, src_grad_out;
float4 dst_grad_grad_out, dst_grad_in2;
src_grad_grad_in1 = reinterpret_cast<const float4 *>(grad_grad_in1)[vec_idx1];
src_in1 = reinterpret_cast<const float4 *>(in1)[vec_idx1];
src_grad_grad_in2 = reinterpret_cast<const float4 *>(grad_grad_in2)[vec_idx2];
src_in2 = reinterpret_cast<const float4 *>(in2)[vec_idx2];
dst_grad_grad_out.x = src_grad_grad_in1.x * src_in2.x + src_grad_grad_in2.x * src_in1.x;
dst_grad_grad_out.y = src_grad_grad_in1.y * src_in2.y + src_grad_grad_in2.y * src_in1.y;
dst_grad_grad_out.z = src_grad_grad_in1.z * src_in2.z + src_grad_grad_in2.z * src_in1.z;
dst_grad_grad_out.w = src_grad_grad_in1.w * src_in2.w + src_grad_grad_in2.w * src_in1.w;
reinterpret_cast<float4 *>(grad_grad_out)[vec_idx2] = dst_grad_grad_out;
src_grad_out = reinterpret_cast<const float4 *>(grad_out)[vec_idx2];
int64_t grad_in1_base_idx = idx1[start_idx] * fea_dim + tidx * 4;
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 0, src_grad_grad_in2.x * src_grad_out.x);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 1, src_grad_grad_in2.y * src_grad_out.y);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 2, src_grad_grad_in2.z * src_grad_out.z);
gpuAtomicAdd(grad_in1 + grad_in1_base_idx + 3, src_grad_grad_in2.w * src_grad_out.w);
dst_grad_in2.x = src_grad_grad_in1.x * src_grad_out.x;
dst_grad_in2.y = src_grad_grad_in1.y * src_grad_out.y;
dst_grad_in2.z = src_grad_grad_in1.z * src_grad_out.z;
dst_grad_in2.w = src_grad_grad_in1.w * src_grad_out.w;
reinterpret_cast<float4 *>(grad_in2)[vec_idx2] = dst_grad_in2;
}
}
__global__ void index_mul_2d_grad_grad_float(
float *grad_grad_out,
float *grad_in1,
float *grad_in2,
const float *grad_out,
const float *grad_grad_in1,
const float *grad_grad_in2,
const float *in1,
const float *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_grad_grad_in1 = grad_grad_in1[vec_idx1 + i];
float src_grad_grad_in2 = grad_grad_in2[vec_idx2 + i];
float src_in1 = in1[vec_idx1 + i];
float src_in2 = in2[vec_idx2 + i];
float src_grad_out = grad_out[vec_idx2 + i];
grad_grad_out[vec_idx2 + i] = src_grad_grad_in1 * src_in2 + src_grad_grad_in2 * src_in1;
grad_in2[vec_idx2 + i] = src_grad_grad_in1 * src_grad_out;
gpuAtomicAdd(grad_in1 + vec_idx1 + i, src_grad_grad_in2 * src_grad_out);
}
}
}
__global__ void index_mul_2d_grad_grad_half(
at::Half *grad_grad_out,
at::Half *grad_in1,
at::Half *grad_in2,
const at::Half *grad_out,
const at::Half *grad_grad_in1,
const at::Half *grad_grad_in2,
const at::Half *in1,
const at::Half *in2,
const int64_t *idx1,
const int64_t size,
const int64_t fea_dim)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int start_idx = bidx * blockDim.y + tidy;
const int stride = blockDim.x;
if (start_idx < size) {
int64_t vec_idx1 = idx1[start_idx] * fea_dim;
int64_t vec_idx2 = start_idx * fea_dim;
for (int i = tidx; i < fea_dim; i += stride) {
float src_grad_grad_in1 = static_cast<float>(grad_grad_in1[vec_idx1 + i]);
float src_grad_grad_in2 = static_cast<float>(grad_grad_in2[vec_idx2 + i]);
float src_in1 = static_cast<float>(in1[vec_idx1 + i]);
float src_in2 = static_cast<float>(in2[vec_idx2 + i]);
float src_grad_out = static_cast<float>(grad_out[vec_idx2 + i]);
grad_grad_out[vec_idx2 + i] = at::Half(src_grad_grad_in1 * src_in2 + src_grad_grad_in2 * src_in1);
grad_in2[vec_idx2 + i] = at::Half(src_grad_grad_in1 * src_grad_out);
gpuAtomicAdd(grad_in1 + vec_idx1 + i, at::Half(src_grad_grad_in2 * src_grad_out));
}
}
}
void index_mul_2d_float_foward_cuda(at::Tensor &out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_float_dim64<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
out.data_ptr<float>(), in1.data_ptr<float>(), in2.data_ptr<float>(),
idx1.data_ptr<int64_t>(), size);
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_float<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
out.data_ptr<float>(), in1.data_ptr<float>(), in2.data_ptr<float>(),
idx1.data_ptr<int64_t>(), size, fea_dim);
}
AT_CUDA_CHECK(cudaGetLastError());
}
void index_mul_2d_float_backward_cuda(at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_float_dim64<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(), grad_out.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size);
AT_CUDA_CHECK(cudaGetLastError());
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_float<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(), grad_out.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
}
void index_mul_2d_float_backward_backward_cuda(at::Tensor &grad_grad_out,
at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &grad_grad_in1,
const at::Tensor &grad_grad_in2,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (fea_dim == 64) {
const int BLOCK_THREADS_DIMX = 16;
const int BLOCK_THREADS_DIMY = 16;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_grad_float_dim64<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_grad_out.data_ptr<float>(), grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(),
grad_out.data_ptr<float>(), grad_grad_in1.data_ptr<float>(), grad_grad_in2.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size);
} else {
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_grad_float<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_grad_out.data_ptr<float>(), grad_in1.data_ptr<float>(), grad_in2.data_ptr<float>(),
grad_out.data_ptr<float>(), grad_grad_in1.data_ptr<float>(), grad_grad_in2.data_ptr<float>(),
in1.data_ptr<float>(), in2.data_ptr<float>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
AT_CUDA_CHECK(cudaGetLastError());
}
void index_mul_2d_half_foward_cuda(at::Tensor &out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_half<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
out.data_ptr<at::Half>(), in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(),
idx1.data_ptr<int64_t>(), size, fea_dim);
AT_CUDA_CHECK(cudaGetLastError());
}
void index_mul_2d_half_backward_cuda(at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_half<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_in1.data_ptr<at::Half>(), grad_in2.data_ptr<at::Half>(), grad_out.data_ptr<at::Half>(),
in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(), idx1.data_ptr<int64_t>(), size, fea_dim);
}
void index_mul_2d_half_backward_backward_cuda(at::Tensor &grad_grad_out,
at::Tensor &grad_in1,
at::Tensor &grad_in2,
const at::Tensor &grad_out,
const at::Tensor &grad_grad_in1,
const at::Tensor &grad_grad_in2,
const at::Tensor &in1,
const at::Tensor &in2,
const at::Tensor &idx1) {
const int64_t size = in2.size(0);
const int64_t fea_dim = in2.size(1);
if (size < 0){
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int BLOCK_THREADS_DIMX = 32;
const int BLOCK_THREADS_DIMY = 8;
const int BLOCK_NUMS = (size + BLOCK_THREADS_DIMY - 1) / BLOCK_THREADS_DIMY;
index_mul_2d_grad_grad_half<<<BLOCK_NUMS, {BLOCK_THREADS_DIMX, BLOCK_THREADS_DIMY, 1}, 0, stream>>>(
grad_grad_out.data_ptr<at::Half>(), grad_in1.data_ptr<at::Half>(), grad_in2.data_ptr<at::Half>(),
grad_out.data_ptr<at::Half>(), grad_grad_in1.data_ptr<at::Half>(), grad_grad_in2.data_ptr<at::Half>(),
in1.data_ptr<at::Half>(), in2.data_ptr<at::Half>(), idx1.data_ptr<int64_t>(), size, fea_dim);
AT_CUDA_CHECK(cudaGetLastError());
} |
d4d70f93c4c738c4dbc288686da0a3af48585200.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "layer_updater_cuda.h"
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
namespace nnforge
{
namespace cuda
{
layer_updater_cuda::layer_updater_cuda()
{
}
layer_updater_cuda::~layer_updater_cuda()
{
}
void layer_updater_cuda::configure(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific,
const_layer_smart_ptr layer_schema,
cuda_running_configuration_const_smart_ptr cuda_config,
bool backprop_required)
{
this->layer_schema = layer_schema;
this->input_configuration_specific = input_configuration_specific;
this->output_configuration_specific = output_configuration_specific;
this->cuda_config = cuda_config;
this->backprop_required = backprop_required;
input_elem_count_per_entry = input_configuration_specific.get_neuron_count();
output_elem_count_per_entry = output_configuration_specific.get_neuron_count();
input_elem_count_per_feature_map = input_configuration_specific.get_neuron_count_per_feature_map();
output_elem_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();
updater_configured();
}
void layer_updater_cuda::updater_configured()
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
return std::vector<size_t>();
}
std::vector<unsigned int> layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const
{
return std::vector<unsigned int>();
}
void layer_updater_cuda::update_buffer_configuration(buffer_cuda_size_configuration& buffer_configuration) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_per_entry_buffer(*it);
std::vector<size_t> fixed_sized = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sized.begin(); it != fixed_sized.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_per_entry_buffer(output_elem_count_per_entry * sizeof(float));
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_per_entry_buffer(input_elem_count_per_entry * sizeof(float));
std::vector<unsigned int> tex_per_entry = get_linear_addressing_through_texture_per_entry();
for(std::vector<unsigned int>::const_iterator it = tex_per_entry.begin(); it != tex_per_entry.end(); ++it)
buffer_configuration.add_per_entry_linear_addressing_through_texture(*it);
}
void layer_updater_cuda::update_buffer_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it * updater_entry_count);
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_constant_buffer(output_elem_count_per_entry * sizeof(float) * updater_entry_count);
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_constant_buffer(input_elem_count_per_entry * sizeof(float) * updater_entry_count);
}
layer_updater_cuda::buffer_set layer_updater_cuda::allocate_all_buffers(unsigned int max_entry_count)
{
buffer_set res;
set_max_entry_count(max_entry_count);
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it * max_entry_count)));
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it)));
fill_additional_buffers(res.additional_buffers);
{
size_t sz = output_elem_count_per_entry * sizeof(float) * max_entry_count;
res.output_neurons_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
if (backprop_required && !is_in_place_backprop())
{
size_t sz = input_elem_count_per_entry * sizeof(float) * max_entry_count;
res.input_errors_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
res.dynamic_memobjects.resize(get_dynamic_memobject_count());
return res;
}
void layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
}
void layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const
{
return std::vector<size_t>();
}
void layer_updater_cuda::set_max_entry_count(unsigned int max_entry_count)
{
}
int layer_updater_cuda::get_dynamic_memobject_count() const
{
return 0;
}
std::vector<cuda_linear_buffer_device_smart_ptr> layer_updater_cuda::get_data(const_layer_data_smart_ptr host_data) const
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
unsigned int part_id = 0;
for(layer_data::const_iterator it = host_data->begin(); it != host_data->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
std::vector<float> pack(single_size);
fill_data_for_device(part_id, &(*it->begin()), &(*pack.begin()), single_size);
res.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float))));
}
return res;
}
std::vector<const_cuda_linear_buffer_device_smart_ptr> layer_updater_cuda::get_learning_rate(const_layer_data_smart_ptr host_learning_rate) const
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> res;
unsigned int part_id = 0;
for(layer_data::const_iterator it = host_learning_rate->begin(); it != host_learning_rate->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
std::vector<float> pack(single_size);
fill_data_for_device(part_id, &(*it->begin()), &(*pack.begin()), single_size);
res.push_back(const_cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float))));
}
return res;
}
void layer_updater_cuda::get_data_from_device(const std::vector<cuda_linear_buffer_device_smart_ptr>& device_data, layer_data_smart_ptr host_data) const
{
unsigned int part_id = 0;
for(layer_data::iterator it = host_data->begin(); it != host_data->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
cuda_linear_buffer_device_smart_ptr src = device_data[part_id];
std::vector<float> pack(src->get_size() / sizeof(float));
cuda_safe_call(hipMemcpy(&(*pack.begin()), *src, pack.size() * sizeof(float), hipMemcpyDeviceToHost));
fill_data_for_host(part_id, &(*pack.begin()), &(*it->begin()), single_size);
}
}
unsigned int layer_updater_cuda::get_data_elem_count(unsigned int part_id, unsigned int source_elem_count) const
{
return source_elem_count;
}
void layer_updater_cuda::fill_data_for_device(
unsigned int part_id,
const float * src,
float * dst,
unsigned int count) const
{
std::copy(src, src + count, dst);
}
void layer_updater_cuda::fill_data_for_host(
unsigned int part_id,
const float * src,
float * dst,
unsigned int count) const
{
std::copy(src, src + count, dst);
}
}
}
| d4d70f93c4c738c4dbc288686da0a3af48585200.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "layer_updater_cuda.h"
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
namespace nnforge
{
namespace cuda
{
layer_updater_cuda::layer_updater_cuda()
{
}
layer_updater_cuda::~layer_updater_cuda()
{
}
void layer_updater_cuda::configure(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific,
const_layer_smart_ptr layer_schema,
cuda_running_configuration_const_smart_ptr cuda_config,
bool backprop_required)
{
this->layer_schema = layer_schema;
this->input_configuration_specific = input_configuration_specific;
this->output_configuration_specific = output_configuration_specific;
this->cuda_config = cuda_config;
this->backprop_required = backprop_required;
input_elem_count_per_entry = input_configuration_specific.get_neuron_count();
output_elem_count_per_entry = output_configuration_specific.get_neuron_count();
input_elem_count_per_feature_map = input_configuration_specific.get_neuron_count_per_feature_map();
output_elem_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();
updater_configured();
}
void layer_updater_cuda::updater_configured()
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
return std::vector<size_t>();
}
std::vector<unsigned int> layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const
{
return std::vector<unsigned int>();
}
void layer_updater_cuda::update_buffer_configuration(buffer_cuda_size_configuration& buffer_configuration) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_per_entry_buffer(*it);
std::vector<size_t> fixed_sized = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sized.begin(); it != fixed_sized.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_per_entry_buffer(output_elem_count_per_entry * sizeof(float));
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_per_entry_buffer(input_elem_count_per_entry * sizeof(float));
std::vector<unsigned int> tex_per_entry = get_linear_addressing_through_texture_per_entry();
for(std::vector<unsigned int>::const_iterator it = tex_per_entry.begin(); it != tex_per_entry.end(); ++it)
buffer_configuration.add_per_entry_linear_addressing_through_texture(*it);
}
void layer_updater_cuda::update_buffer_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it * updater_entry_count);
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_constant_buffer(output_elem_count_per_entry * sizeof(float) * updater_entry_count);
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_constant_buffer(input_elem_count_per_entry * sizeof(float) * updater_entry_count);
}
layer_updater_cuda::buffer_set layer_updater_cuda::allocate_all_buffers(unsigned int max_entry_count)
{
buffer_set res;
set_max_entry_count(max_entry_count);
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it * max_entry_count)));
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it)));
fill_additional_buffers(res.additional_buffers);
{
size_t sz = output_elem_count_per_entry * sizeof(float) * max_entry_count;
res.output_neurons_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
if (backprop_required && !is_in_place_backprop())
{
size_t sz = input_elem_count_per_entry * sizeof(float) * max_entry_count;
res.input_errors_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
res.dynamic_memobjects.resize(get_dynamic_memobject_count());
return res;
}
void layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
}
void layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const
{
return std::vector<size_t>();
}
void layer_updater_cuda::set_max_entry_count(unsigned int max_entry_count)
{
}
int layer_updater_cuda::get_dynamic_memobject_count() const
{
return 0;
}
std::vector<cuda_linear_buffer_device_smart_ptr> layer_updater_cuda::get_data(const_layer_data_smart_ptr host_data) const
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
unsigned int part_id = 0;
for(layer_data::const_iterator it = host_data->begin(); it != host_data->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
std::vector<float> pack(single_size);
fill_data_for_device(part_id, &(*it->begin()), &(*pack.begin()), single_size);
res.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float))));
}
return res;
}
std::vector<const_cuda_linear_buffer_device_smart_ptr> layer_updater_cuda::get_learning_rate(const_layer_data_smart_ptr host_learning_rate) const
{
std::vector<const_cuda_linear_buffer_device_smart_ptr> res;
unsigned int part_id = 0;
for(layer_data::const_iterator it = host_learning_rate->begin(); it != host_learning_rate->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
std::vector<float> pack(single_size);
fill_data_for_device(part_id, &(*it->begin()), &(*pack.begin()), single_size);
res.push_back(const_cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
&(*pack.begin()),
pack.size() * sizeof(float))));
}
return res;
}
void layer_updater_cuda::get_data_from_device(const std::vector<cuda_linear_buffer_device_smart_ptr>& device_data, layer_data_smart_ptr host_data) const
{
unsigned int part_id = 0;
for(layer_data::iterator it = host_data->begin(); it != host_data->end(); ++it, ++part_id)
{
unsigned int single_size = get_data_elem_count(part_id, it->size());
cuda_linear_buffer_device_smart_ptr src = device_data[part_id];
std::vector<float> pack(src->get_size() / sizeof(float));
cuda_safe_call(cudaMemcpy(&(*pack.begin()), *src, pack.size() * sizeof(float), cudaMemcpyDeviceToHost));
fill_data_for_host(part_id, &(*pack.begin()), &(*it->begin()), single_size);
}
}
unsigned int layer_updater_cuda::get_data_elem_count(unsigned int part_id, unsigned int source_elem_count) const
{
return source_elem_count;
}
void layer_updater_cuda::fill_data_for_device(
unsigned int part_id,
const float * src,
float * dst,
unsigned int count) const
{
std::copy(src, src + count, dst);
}
void layer_updater_cuda::fill_data_for_host(
unsigned int part_id,
const float * src,
float * dst,
unsigned int count) const
{
std::copy(src, src + count, dst);
}
}
}
|
0ff94d8f6637009baebfa85ac3e487d21b1103fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../gpu_utils/runtime.h"
#include "GLIFCurrExpNeuronsKernel.h"
__global void find_lifcurrexp_neurons(GLIFCurrExpNeurons *d_neurons, int num, int start_id)
{
__shared__ int active_table_t[MAXBLOCKSIZE];
__shared__ volatile int active_cnt;
if (threadIdx.x == 0) {
active_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < num; idx += blockDim.x * gridDim.x) {
int test_loc = 0;
bool actived = d_neurons->p_refrac_step[idx] <= 0;
if (actived) {
test_loc = atomicAdd((int*)&active_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
active_table_t[test_loc] = idx;
actived = false;
}
} else {
gNeuronInput[start_id + idx] = 0;
gNeuronInput_I[start_id + idx] = 0;
d_neurons->p_refrac_step[idx] = d_neurons->p_refrac_step[idx] - 1;
}
__syncthreads();
if (active_cnt >= MAXBLOCKSIZE) {
commit2globalTable(active_table_t, MAXBLOCKSIZE, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
if (actived) {
test_loc = atomicAdd((int*)&active_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
active_table_t[test_loc] = idx;
actived = false;
}
}
__syncthreads();
if (active_cnt >= MAXBLOCKSIZE) {
commit2globalTable(active_table_t, MAXBLOCKSIZE, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
if (active_cnt > 0) {
commit2globalTable(active_table_t, active_cnt, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
}
}
__global void update_lifcurrexp_neurons(GLIFCurrExpNeurons *d_neurons, int num, int start_id)
{
__shared__ int fire_table_t[MAXBLOCKSIZE];
__shared__ volatile int fire_cnt;
if (threadIdx.x == 0) {
fire_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < gActiveTableSize; idx +=blockDim.x*gridDim.x) {
bool fired = false;
int test_loc = 0;
int nid = gActiveTable[idx];
int gnid = start_id + nid;
d_neurons->p_vm[nid] = d_neurons->p_Cm[nid] * d_neurons->p_vm[nid] + d_neurons->p_v_tmp[nid] + d_neurons->p_i_E[nid] * d_neurons->p_C_E[nid] + d_neurons->p_i_I[nid] * d_neurons->p_C_I[nid];
d_neurons->p_i_E[nid] *= d_neurons->p_CE[nid];
d_neurons->p_i_I[nid] *= d_neurons->p_CI[nid];
fired = d_neurons->p_vm[nid] >= d_neurons->p_v_thresh[nid];
gFireCount[gnid] += fired;
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
d_neurons->p_refrac_step[nid] = d_neurons->p_refrac_time[nid] - 1;
d_neurons->p_vm[nid] = d_neurons->p_v_reset[nid];
} else {
gXInput[gnid] += gNeuronInput[gnid] + gNeuronInput_I[gnid];
d_neurons->p_i_E[nid] += gNeuronInput[gnid];
d_neurons->p_i_I[nid] += gNeuronInput_I[gnid];
}
gNeuronInput[gnid] = 0;
gNeuronInput_I[gnid] = 0;
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
}
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
if (fire_cnt > 0) {
commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
}
}
| 0ff94d8f6637009baebfa85ac3e487d21b1103fa.cu |
#include "../../gpu_utils/runtime.h"
#include "GLIFCurrExpNeuronsKernel.h"
__global void find_lifcurrexp_neurons(GLIFCurrExpNeurons *d_neurons, int num, int start_id)
{
__shared__ int active_table_t[MAXBLOCKSIZE];
__shared__ volatile int active_cnt;
if (threadIdx.x == 0) {
active_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < num; idx += blockDim.x * gridDim.x) {
int test_loc = 0;
bool actived = d_neurons->p_refrac_step[idx] <= 0;
if (actived) {
test_loc = atomicAdd((int*)&active_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
active_table_t[test_loc] = idx;
actived = false;
}
} else {
gNeuronInput[start_id + idx] = 0;
gNeuronInput_I[start_id + idx] = 0;
d_neurons->p_refrac_step[idx] = d_neurons->p_refrac_step[idx] - 1;
}
__syncthreads();
if (active_cnt >= MAXBLOCKSIZE) {
commit2globalTable(active_table_t, MAXBLOCKSIZE, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
if (actived) {
test_loc = atomicAdd((int*)&active_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
active_table_t[test_loc] = idx;
actived = false;
}
}
__syncthreads();
if (active_cnt >= MAXBLOCKSIZE) {
commit2globalTable(active_table_t, MAXBLOCKSIZE, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
if (active_cnt > 0) {
commit2globalTable(active_table_t, active_cnt, gActiveTable, &gActiveTableSize, 0);
if (threadIdx.x == 0) {
active_cnt = 0;
}
}
__syncthreads();
}
}
__global void update_lifcurrexp_neurons(GLIFCurrExpNeurons *d_neurons, int num, int start_id)
{
__shared__ int fire_table_t[MAXBLOCKSIZE];
__shared__ volatile int fire_cnt;
if (threadIdx.x == 0) {
fire_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < gActiveTableSize; idx +=blockDim.x*gridDim.x) {
bool fired = false;
int test_loc = 0;
int nid = gActiveTable[idx];
int gnid = start_id + nid;
d_neurons->p_vm[nid] = d_neurons->p_Cm[nid] * d_neurons->p_vm[nid] + d_neurons->p_v_tmp[nid] + d_neurons->p_i_E[nid] * d_neurons->p_C_E[nid] + d_neurons->p_i_I[nid] * d_neurons->p_C_I[nid];
d_neurons->p_i_E[nid] *= d_neurons->p_CE[nid];
d_neurons->p_i_I[nid] *= d_neurons->p_CI[nid];
fired = d_neurons->p_vm[nid] >= d_neurons->p_v_thresh[nid];
gFireCount[gnid] += fired;
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
d_neurons->p_refrac_step[nid] = d_neurons->p_refrac_time[nid] - 1;
d_neurons->p_vm[nid] = d_neurons->p_v_reset[nid];
} else {
gXInput[gnid] += gNeuronInput[gnid] + gNeuronInput_I[gnid];
d_neurons->p_i_E[nid] += gNeuronInput[gnid];
d_neurons->p_i_I[nid] += gNeuronInput_I[gnid];
}
gNeuronInput[gnid] = 0;
gNeuronInput_I[gnid] = 0;
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
}
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
if (fire_cnt > 0) {
commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
}
}
|
d1efd4de8bd78e73afe1d699786bb5511ad292b1.hip | // !!! This is a file automatically generated by hipify!!!
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/stgauss3.h>
#include <oz/generate.h>
#include <oz/gpu_sampler2.h>
#include <oz/stintrk2.h>
#include <oz/filter_gauss.h>
namespace oz {
template<typename T, int order, bool adaptive, class SRC, class ST> struct StGauss3Filter : generator<T> {
unsigned w_, h_;
const SRC src_;
const ST st_;
float sigma_;
float step_size_;
StGauss3Filter( unsigned w, unsigned h, const SRC& src, const ST& st, float sigma, float step_size )
: w_(w), h_(h), src_(src), st_(st), sigma_(sigma), step_size_(step_size) {}
inline __device__ T operator()( int ix, int iy ) const {
float2 p0 = make_float2(ix + 0.5f, iy + 0.5f);
filter_gauss_1d<T,SRC> f(src_, sigma_);
st3_int<ST,filter_gauss_1d<T,SRC>,order,adaptive>(p0, st_, f, w_, h_, step_size_);
return f.result();
}
};
template<typename T, int order>
gpu_image filterTO( const gpu_image& src, bool src_linear,
const gpu_image& st, bool st_linear,
float sigma, bool adaptive,
float step_size )
{
if (adaptive) {
return generate(src.size(), StGauss3Filter<T, order, true, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? hipFilterModeLinear : hipFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? hipFilterModeLinear : hipFilterModePoint),
sigma, step_size));
} else {
return generate(src.size(), StGauss3Filter<T, order, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? hipFilterModeLinear : hipFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? hipFilterModeLinear : hipFilterModePoint),
sigma, step_size));
}
}
template<typename T>
gpu_image filterT( const gpu_image& src, bool src_linear,
const gpu_image& st, bool st_linear,
float sigma, bool adaptive,
int order, float step_size )
{
switch (order) {
case 1: return filterTO<T,1>(src, src_linear, st, st_linear, sigma, adaptive, step_size);
case 2: return filterTO<T,2>(src, src_linear, st, st_linear, sigma, adaptive, step_size);
default:
OZ_X() << "Invalid order!";
}
}
gpu_image stgauss3_filter_( const gpu_image& src, const gpu_image& st,
float sigma, bool adaptive, bool src_linear, bool st_linear,
int order, float step_size )
{
if (sigma <= 0) return src;
switch (src.format()) {
case FMT_FLOAT3: return filterT<float3>(src, src_linear, st, st_linear, sigma, adaptive, order, step_size);
default:
OZ_INVALID_FORMAT();
}
}
}
| d1efd4de8bd78e73afe1d699786bb5511ad292b1.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/stgauss3.h>
#include <oz/generate.h>
#include <oz/gpu_sampler2.h>
#include <oz/stintrk2.h>
#include <oz/filter_gauss.h>
namespace oz {
template<typename T, int order, bool adaptive, class SRC, class ST> struct StGauss3Filter : generator<T> {
unsigned w_, h_;
const SRC src_;
const ST st_;
float sigma_;
float step_size_;
StGauss3Filter( unsigned w, unsigned h, const SRC& src, const ST& st, float sigma, float step_size )
: w_(w), h_(h), src_(src), st_(st), sigma_(sigma), step_size_(step_size) {}
inline __device__ T operator()( int ix, int iy ) const {
float2 p0 = make_float2(ix + 0.5f, iy + 0.5f);
filter_gauss_1d<T,SRC> f(src_, sigma_);
st3_int<ST,filter_gauss_1d<T,SRC>,order,adaptive>(p0, st_, f, w_, h_, step_size_);
return f.result();
}
};
template<typename T, int order>
gpu_image filterTO( const gpu_image& src, bool src_linear,
const gpu_image& st, bool st_linear,
float sigma, bool adaptive,
float step_size )
{
if (adaptive) {
return generate(src.size(), StGauss3Filter<T, order, true, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? cudaFilterModeLinear : cudaFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? cudaFilterModeLinear : cudaFilterModePoint),
sigma, step_size));
} else {
return generate(src.size(), StGauss3Filter<T, order, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? cudaFilterModeLinear : cudaFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? cudaFilterModeLinear : cudaFilterModePoint),
sigma, step_size));
}
}
template<typename T>
gpu_image filterT( const gpu_image& src, bool src_linear,
const gpu_image& st, bool st_linear,
float sigma, bool adaptive,
int order, float step_size )
{
switch (order) {
case 1: return filterTO<T,1>(src, src_linear, st, st_linear, sigma, adaptive, step_size);
case 2: return filterTO<T,2>(src, src_linear, st, st_linear, sigma, adaptive, step_size);
default:
OZ_X() << "Invalid order!";
}
}
gpu_image stgauss3_filter_( const gpu_image& src, const gpu_image& st,
float sigma, bool adaptive, bool src_linear, bool st_linear,
int order, float step_size )
{
if (sigma <= 0) return src;
switch (src.format()) {
case FMT_FLOAT3: return filterT<float3>(src, src_linear, st, st_linear, sigma, adaptive, order, step_size);
default:
OZ_INVALID_FORMAT();
}
}
}
|
e4d68cfcc0737790eb43bfaa5dd38718d83a07a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialClassNLLCriterion.hip"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
TORCH_CHECK(!target->is_empty() && target->dim() == 3, 1,
"only batches of spatial targets supported (non-empty 3D tensors)" \
" but got targets of size: : ", target->sizes());
TORCH_CHECK(!input->is_empty() && input->dim() == 4, 2,
"only batches of spatial inputs supported (non-empty 4D tensors), " \
"but got input of size: ", input->sizes());
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
static void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
THCState *state,
THCTensor *gradOutput,
THCIndexTensor *target)
{
TORCH_CHECK(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2,
"Expected non-empty dimension 3 but got gradOutput of size: ", gradOutput->sizes());
if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("gradOutput sizes don't match target sizes: target %s, gradOutput %s",
target_size.str, gradOutput_size.str);
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
if (reduction == Reduction::None) {
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
THCTensor_(resize3d)(state, output, batch_size, H, W);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
int64_t count = batch_size * H * W;
hipLaunchKernelGGL(( SpatialClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count,
toDeviceTensor<scalar_t, 4>(state, input),
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
THCTensor_(fill)(state, output, ScalarConvert<int, scalar_t>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, scalar_t>::to(0));
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
// THCudaCheck(hipGetLastError());
if (reduction == Reduction::Mean) {
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_sizeAverage_kernel), dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
output_data, total_weight_data
);
// THCudaCheck(hipGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
if (reduction == Reduction::None) {
THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
state,
gradOutput,
target);
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
int64_t count = batch_size * H * W;
hipLaunchKernelGGL(( SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count,
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, gradOutput),
toDeviceTensor<scalar_t, 4>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
// THCudaCheck(hipGetLastError());
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
#endif
| e4d68cfcc0737790eb43bfaa5dd38718d83a07a6.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialClassNLLCriterion.cu"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
TORCH_CHECK(!target->is_empty() && target->dim() == 3, 1,
"only batches of spatial targets supported (non-empty 3D tensors)" \
" but got targets of size: : ", target->sizes());
TORCH_CHECK(!input->is_empty() && input->dim() == 4, 2,
"only batches of spatial inputs supported (non-empty 4D tensors), " \
"but got input of size: ", input->sizes());
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
static void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
THCState *state,
THCTensor *gradOutput,
THCIndexTensor *target)
{
TORCH_CHECK(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2,
"Expected non-empty dimension 3 but got gradOutput of size: ", gradOutput->sizes());
if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("gradOutput sizes don't match target sizes: target %s, gradOutput %s",
target_size.str, gradOutput_size.str);
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
if (reduction == Reduction::None) {
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
THCTensor_(resize3d)(state, output, batch_size, H, W);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
int64_t count = batch_size * H * W;
SpatialClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count,
toDeviceTensor<scalar_t, 4>(state, input),
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
THCTensor_(fill)(state, output, ScalarConvert<int, scalar_t>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, scalar_t>::to(0));
cunn_SpatialClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>
<<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
// THCudaCheck(cudaGetLastError());
if (reduction == Reduction::Mean) {
cunn_SpatialClassNLLCriterion_sizeAverage_kernel<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
output_data, total_weight_data
);
// THCudaCheck(cudaGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
if (reduction == Reduction::None) {
THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
state,
gradOutput,
target);
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
int64_t count = batch_size * H * W;
SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count,
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, gradOutput),
toDeviceTensor<scalar_t, 4>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
cunn_SpatialClassNLLCriterion_updateGradInput_kernel
<<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
// THCudaCheck(cudaGetLastError());
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
#endif
|
c994a2efa62810ee923208dba609fe8024e105c0.hip | // !!! This is a file automatically generated by hipify!!!
//Do NOT MODIFY THIS FILE
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "gpuerrors.h"
#include "scan2.h"
//-----------------------------------------------------------------------------
void fill(int* data, int size) { // to fill array with -2 -1 0 1 2
for (int i=0; i<size; ++i)
data[i] = (int) (rand() % 3);
}
double calc_mse (int* data1, int* data2, int size) { // calculate error by ( )^2
double mse = 0.0;
int i;
int e = 0;
for (i=0; i<size; i++) {
e = data1[i] - data2[i];
e = e * e;
mse += (double) e;
}
mse = mse / ((double)size);
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel (int* a, int* c, int n) { // calculate scan algorithm
int i = 0;
int sum = 0;
for (i = 0; i < n; i += 1){
c[i] = sum; // to have exclusive scan
sum += (a[i]);
//c[i] = sum; // to have inclusive scan
}
return;
}
//-----------------------------------------------------------------------------
int main ( int argc, char** argv) {
int* a;
int* c_serial;
int* c;
int m = 5;
int n = 32;
if (argc > 1){
m = atoi(argv[1]);
n = (1 << m);
}
a = (int*)malloc(n * sizeof(int));
c_serial = (int*)malloc(n * sizeof(int));
c = (int*)malloc(n * sizeof(int));
srand(0);
fill(a, n);
clock_t t0 = clock();
cpuKernel (a, c_serial, n);
clock_t t1 = clock();
clock_t t2 = clock();
gpuKernel (a, c, n);
clock_t t3 = clock();
float mse;
mse = calc_mse( c_serial, c, n );
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
//printf("%d\t%d\t%d\t%d\t%d\t", c[0],c[1],c[2],c[3],c[4]);
free(a);
free(c_serial);
free(c);
return 0;
}
//Do NOT MODIFY THIS FILE
| c994a2efa62810ee923208dba609fe8024e105c0.cu | //Do NOT MODIFY THIS FILE
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gpuerrors.h"
#include "scan2.h"
//-----------------------------------------------------------------------------
void fill(int* data, int size) { // to fill array with -2 -1 0 1 2
for (int i=0; i<size; ++i)
data[i] = (int) (rand() % 3);
}
double calc_mse (int* data1, int* data2, int size) { // calculate error by ( )^2
double mse = 0.0;
int i;
int e = 0;
for (i=0; i<size; i++) {
e = data1[i] - data2[i];
e = e * e;
mse += (double) e;
}
mse = mse / ((double)size);
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel (int* a, int* c, int n) { // calculate scan algorithm
int i = 0;
int sum = 0;
for (i = 0; i < n; i += 1){
c[i] = sum; // to have exclusive scan
sum += (a[i]);
//c[i] = sum; // to have inclusive scan
}
return;
}
//-----------------------------------------------------------------------------
int main ( int argc, char** argv) {
int* a;
int* c_serial;
int* c;
int m = 5;
int n = 32;
if (argc > 1){
m = atoi(argv[1]);
n = (1 << m);
}
a = (int*)malloc(n * sizeof(int));
c_serial = (int*)malloc(n * sizeof(int));
c = (int*)malloc(n * sizeof(int));
srand(0);
fill(a, n);
clock_t t0 = clock();
cpuKernel (a, c_serial, n);
clock_t t1 = clock();
clock_t t2 = clock();
gpuKernel (a, c, n);
clock_t t3 = clock();
float mse;
mse = calc_mse( c_serial, c, n );
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
//printf("%d\t%d\t%d\t%d\t%d\t", c[0],c[1],c[2],c[3],c[4]);
free(a);
free(c_serial);
free(c);
return 0;
}
//Do NOT MODIFY THIS FILE
|
f06505e31383b877690d62f5e757ac13f1606c2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sharedmem.cuh"
#include "reduce.h"
#include <stdio.h>
#define square(x) ((x) * (x))
__global__ void reduce_gpu(int size, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float s = 0;
for (i = tid; i < size; i += tt) {
s += d_idata[i];
}
d_odata[tid] = s;
}
__global__ void reduce_gpu_ss(int size, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float s = 0;
for (i = tid; i < size; i += tt) {
s += square(d_idata[i]);
}
d_odata[tid] = s;
}
__global__ void reduce_gpu(int size, int D, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
int j;
SharedMemory<float> smem;
float* sdata = smem.getPointer();
const int tidib = threadIdx.x;
for (j = 0; j < D; j++) {
sdata[tidib * D + j] = 0;
}
for (i = tid; i < size; i += tt) {
for (j = 0; j < D; j++) {
sdata[tidib * D + j] += d_idata[i * D + j];
}
}
for (j = 0; j < D; j++) {
d_odata[tid * D + j] = sdata[tidib * D + j];
}
}
void reduce(int size, float* d_idata, float& h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
hipMalloc((void **) &d_sum, tt * sizeof(float));
float* tmp = (float*) malloc(tt * sizeof(float));
hipLaunchKernelGGL(( reduce_gpu), dim3(nb), dim3(nt) , 0, 0, size, d_idata, d_sum);
hipMemcpy(tmp, d_sum, tt * sizeof(float), hipMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < tt; i++) {
sum += tmp[i];
}
h_sum = (float) sum;
hipFree(d_sum);
free(tmp);
}
void reduce_ss(int size, float* d_idata, float& h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
hipMalloc((void **) &d_sum, tt * sizeof(float));
float* tmp = (float*) malloc(tt * sizeof(float));
// float* tmp;
// hipHostMalloc((void **) &tmp, tt * sizeof(float));
hipLaunchKernelGGL(( reduce_gpu_ss), dim3(nb), dim3(nt) , 0, 0, size, d_idata, d_sum);
hipMemcpy(tmp, d_sum, tt * sizeof(float), hipMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < tt; i++) {
sum += tmp[i];
}
h_sum = (float) sum;
hipFree(d_sum);
// hipHostFree(tmp);
free(tmp);
}
void reduce(int size, int D, float* d_idata, float* h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
hipMalloc((void **) &d_sum, tt * D * sizeof(float));
float* tmp = (float*) malloc(tt * D * sizeof(float));
// float* tmp;
// hipHostMalloc((void **) &tmp, tt * D * sizeof(float));
if (nt * D * sizeof(float) >= 16384) {
printf("not enough shared memory!\n");
}
hipLaunchKernelGGL(( reduce_gpu), dim3(nb), dim3(nt), nt * D * sizeof(float) , 0, size, D, d_idata, d_sum);
hipMemcpy(tmp, d_sum, tt * D * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < D; i++) {
h_sum[i] = 0;
}
for (int i = 0; i < tt; i++) {
for (int j = 0; j < D; j++) {
h_sum[j] += tmp[i * D + j];
}
}
hipFree(d_sum);
// hipHostFree(tmp);
free(tmp);
}
//__global__ void reduce_gpu2(int size, float *d_idata, float *d_odata) {
//
// SharedMemory<float> smem;
// float *sdata = smem.getPointer();
//
// // perform first level of reduction,
// // reading from global memory, writing to shared memory
// int threads_per_block = blockDim.x;
// int blockId = blockIdx.x;
// int numBlocks = gridDim.x;
// unsigned int tid = threadIdx.x;
// unsigned int i = blockId * (threads_per_block * 2) + tid;
// unsigned int gridSize = threads_per_block * 2 * numBlocks;
// sdata[tid] = 0;
//
// // we reduce multiple elements per thread. The number is determined by the
// // number of active thread blocks (via gridSize). More blocks will result
// // in a larger gridSize and therefore fewer elements per thread
// while (i < size) {
// sdata[tid] += d_idata[i] + d_idata[i+threads_per_block];
// i += gridSize;
// }
// __syncthreads();
//
// if (threads_per_block >= 512) {
// if (tid < 256) {
// sdata[tid] += sdata[tid + 256];
// }
// __syncthreads();
// }
// if (threads_per_block >= 256) {
// if (tid < 128) {
// sdata[tid] += sdata[tid + 128];
// }
// __syncthreads();
// }
// if (threads_per_block >= 128) {
// if (tid < 64) {
// sdata[tid] += sdata[tid + 64];
// }
// __syncthreads();
// }
//
//#ifndef __DEVICE_EMULATION__
// // TODO: WHY!??
// if (tid < 32)
//#endif
// {
// if (threads_per_block >= 64) {
// sdata[tid] += sdata[tid + 32];
// __syncthreads();
// }
// if (threads_per_block >= 32) {
// sdata[tid] += sdata[tid + 16];
// __syncthreads();
// }
// if (threads_per_block >= 16) {
// sdata[tid] += sdata[tid + 8];
// __syncthreads();
// }
// if (threads_per_block >= 8) {
// sdata[tid] += sdata[tid + 4];
// __syncthreads();
// }
// if (threads_per_block >= 4) {
// sdata[tid] += sdata[tid + 2];
// __syncthreads();
// }
// if (threads_per_block >= 2) {
// sdata[tid] += sdata[tid + 1];
// __syncthreads();
// }
// }
//
// // write result for this block to global mem
// if (tid == 0) {
// d_odata[blockIdx.x] = sdata[0];
// }
//
//}
//
//void reduce(int size, float *d_idata, float& h_sum, int nb, int nt) {
// int smemSize = nt * sizeof(float);
// //
// // N_BLOCKS
// float* d_sum;
// hipMalloc((void **) &d_sum, nb * sizeof(float));
//
// float* tmp;
// hipHostMalloc((void **) &tmp, nb * sizeof(float));
//
// reduce_gpu2<<< nb, nt, smemSize >>>(size, d_idata, d_sum);
//
// hipMemcpy(tmp, d_sum, nb * sizeof(float), hipMemcpyDeviceToHost);
//
// h_sum = 0;
// for (int i = 0; i < nb; i++) {
// h_sum += tmp[i];
// }
//
// hipFree(d_sum);
// hipHostFree(tmp);
//
//}
| f06505e31383b877690d62f5e757ac13f1606c2e.cu | #include "sharedmem.cuh"
#include "reduce.h"
#include <stdio.h>
#define square(x) ((x) * (x))
__global__ void reduce_gpu(int size, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float s = 0;
for (i = tid; i < size; i += tt) {
s += d_idata[i];
}
d_odata[tid] = s;
}
__global__ void reduce_gpu_ss(int size, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float s = 0;
for (i = tid; i < size; i += tt) {
s += square(d_idata[i]);
}
d_odata[tid] = s;
}
__global__ void reduce_gpu(int size, int D, float* d_idata, float* d_odata) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
int j;
SharedMemory<float> smem;
float* sdata = smem.getPointer();
const int tidib = threadIdx.x;
for (j = 0; j < D; j++) {
sdata[tidib * D + j] = 0;
}
for (i = tid; i < size; i += tt) {
for (j = 0; j < D; j++) {
sdata[tidib * D + j] += d_idata[i * D + j];
}
}
for (j = 0; j < D; j++) {
d_odata[tid * D + j] = sdata[tidib * D + j];
}
}
void reduce(int size, float* d_idata, float& h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
cudaMalloc((void **) &d_sum, tt * sizeof(float));
float* tmp = (float*) malloc(tt * sizeof(float));
reduce_gpu<<< nb, nt >>>(size, d_idata, d_sum);
cudaMemcpy(tmp, d_sum, tt * sizeof(float), cudaMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < tt; i++) {
sum += tmp[i];
}
h_sum = (float) sum;
cudaFree(d_sum);
free(tmp);
}
void reduce_ss(int size, float* d_idata, float& h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
cudaMalloc((void **) &d_sum, tt * sizeof(float));
float* tmp = (float*) malloc(tt * sizeof(float));
// float* tmp;
// cudaMallocHost((void **) &tmp, tt * sizeof(float));
reduce_gpu_ss<<< nb, nt >>>(size, d_idata, d_sum);
cudaMemcpy(tmp, d_sum, tt * sizeof(float), cudaMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < tt; i++) {
sum += tmp[i];
}
h_sum = (float) sum;
cudaFree(d_sum);
// cudaFreeHost(tmp);
free(tmp);
}
void reduce(int size, int D, float* d_idata, float* h_sum, int nb, int nt) {
const int tt = nb * nt;
float* d_sum;
cudaMalloc((void **) &d_sum, tt * D * sizeof(float));
float* tmp = (float*) malloc(tt * D * sizeof(float));
// float* tmp;
// cudaMallocHost((void **) &tmp, tt * D * sizeof(float));
if (nt * D * sizeof(float) >= 16384) {
printf("not enough shared memory!\n");
}
reduce_gpu<<< nb, nt, nt * D * sizeof(float) >>>(size, D, d_idata, d_sum);
cudaMemcpy(tmp, d_sum, tt * D * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < D; i++) {
h_sum[i] = 0;
}
for (int i = 0; i < tt; i++) {
for (int j = 0; j < D; j++) {
h_sum[j] += tmp[i * D + j];
}
}
cudaFree(d_sum);
// cudaFreeHost(tmp);
free(tmp);
}
//__global__ void reduce_gpu2(int size, float *d_idata, float *d_odata) {
//
// SharedMemory<float> smem;
// float *sdata = smem.getPointer();
//
// // perform first level of reduction,
// // reading from global memory, writing to shared memory
// int threads_per_block = blockDim.x;
// int blockId = blockIdx.x;
// int numBlocks = gridDim.x;
// unsigned int tid = threadIdx.x;
// unsigned int i = blockId * (threads_per_block * 2) + tid;
// unsigned int gridSize = threads_per_block * 2 * numBlocks;
// sdata[tid] = 0;
//
// // we reduce multiple elements per thread. The number is determined by the
// // number of active thread blocks (via gridSize). More blocks will result
// // in a larger gridSize and therefore fewer elements per thread
// while (i < size) {
// sdata[tid] += d_idata[i] + d_idata[i+threads_per_block];
// i += gridSize;
// }
// __syncthreads();
//
// if (threads_per_block >= 512) {
// if (tid < 256) {
// sdata[tid] += sdata[tid + 256];
// }
// __syncthreads();
// }
// if (threads_per_block >= 256) {
// if (tid < 128) {
// sdata[tid] += sdata[tid + 128];
// }
// __syncthreads();
// }
// if (threads_per_block >= 128) {
// if (tid < 64) {
// sdata[tid] += sdata[tid + 64];
// }
// __syncthreads();
// }
//
//#ifndef __DEVICE_EMULATION__
// // TODO: WHY!??
// if (tid < 32)
//#endif
// {
// if (threads_per_block >= 64) {
// sdata[tid] += sdata[tid + 32];
// __syncthreads();
// }
// if (threads_per_block >= 32) {
// sdata[tid] += sdata[tid + 16];
// __syncthreads();
// }
// if (threads_per_block >= 16) {
// sdata[tid] += sdata[tid + 8];
// __syncthreads();
// }
// if (threads_per_block >= 8) {
// sdata[tid] += sdata[tid + 4];
// __syncthreads();
// }
// if (threads_per_block >= 4) {
// sdata[tid] += sdata[tid + 2];
// __syncthreads();
// }
// if (threads_per_block >= 2) {
// sdata[tid] += sdata[tid + 1];
// __syncthreads();
// }
// }
//
// // write result for this block to global mem
// if (tid == 0) {
// d_odata[blockIdx.x] = sdata[0];
// }
//
//}
//
//void reduce(int size, float *d_idata, float& h_sum, int nb, int nt) {
// int smemSize = nt * sizeof(float);
// //
// // N_BLOCKS
// float* d_sum;
// cudaMalloc((void **) &d_sum, nb * sizeof(float));
//
// float* tmp;
// cudaMallocHost((void **) &tmp, nb * sizeof(float));
//
// reduce_gpu2<<< nb, nt, smemSize >>>(size, d_idata, d_sum);
//
// cudaMemcpy(tmp, d_sum, nb * sizeof(float), cudaMemcpyDeviceToHost);
//
// h_sum = 0;
// for (int i = 0; i < nb; i++) {
// h_sum += tmp[i];
// }
//
// cudaFree(d_sum);
// cudaFreeHost(tmp);
//
//}
|
a041c12c642b1c407111d72b05216df623ed96e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#include "config.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE BLOCK_SIZE
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define WA MSIZE // Matrix A width
#define HA MSIZE // Matrix A height
#define WB MSIZE // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (MSIZE/mm_BLOCK_SIZE)
#define mm_GRID_Y (MSIZE/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,hipReadModeElementType> tex_A;
texture<float,2,hipReadModeElementType> tex_B;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel(float *B, float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// hipSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA;
uiHA = HA;
uiWB = WB;
uiHB = HB;
uiWC = WC;
uiHC = HC;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
float* d_B;
hipMalloc((void **)&d_B,mem_size_B);
// hipMemcpy()
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
// checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
hipChannelFormatDesc channelDescB = hipCreateChannelDesc<float>();
hipArray* A_Array, *B_Array;
hipMallocArray(&A_Array, &channelDescA, uiWA, uiHA);
hipMallocArray(&B_Array, &channelDescB, uiWB, uiHB);
// Copy to device memory some data located at address h_data
// in host memory
hipMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
hipMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = hipAddressModeWrap;
tex_A.addressMode[1] = hipAddressModeWrap;
tex_A.filterMode = hipFilterModePoint;
tex_B.addressMode[0] = hipAddressModeWrap;
tex_B.addressMode[1] = hipAddressModeWrap;
tex_B.filterMode = hipFilterModePoint;
// Bind the array to the texture reference
hipBindTextureToArray(tex_A, A_Array, channelDescA);
hipBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
//checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// warm up the GPU
for (int rpt=0; rpt<5; rpt++)
{
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_B,d_C, uiWA, uiWB);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
printf("dimGrid: %dx%d dimBlock: %dx%d mat size: %dx%d\n",mm_GRID_X,mm_GRID_Y,mm_BLOCK_SIZE,mm_BLOCK_SIZE,uiWA,uiHA);
hipEventRecord(kernel_start, 0);
for (int rpt=0; rpt<ITERATIONS; rpt++)
{
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_B,d_C, uiWA, uiWB);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
/*
*/
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
//computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
// bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
// printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
free(reference);
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
return 0;
}
| a041c12c642b1c407111d72b05216df623ed96e8.cu | #include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#include "config.h"
#define K 1
using namespace std;
#define mm_BLOCK_SIZE BLOCK_SIZE
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define WA MSIZE // Matrix A width
#define HA MSIZE // Matrix A height
#define WB MSIZE // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (MSIZE/mm_BLOCK_SIZE)
#define mm_GRID_Y (MSIZE/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
texture<float,2,cudaReadModeElementType> tex_A;
texture<float,2,cudaReadModeElementType> tex_B;
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel(float *B, float* C, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// cudaSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA;
uiHA = HA;
uiWB = WB;
uiHB = HB;
uiWC = WC;
uiHC = HC;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
float* d_B;
cudaMalloc((void **)&d_B,mem_size_B);
// cudaMemcpy()
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
// checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc channelDescB = cudaCreateChannelDesc<float>();
cudaArray* A_Array, *B_Array;
cudaMallocArray(&A_Array, &channelDescA, uiWA, uiHA);
cudaMallocArray(&B_Array, &channelDescB, uiWB, uiHB);
// Copy to device memory some data located at address h_data
// in host memory
cudaMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpyToArray(B_Array, 0, 0, h_B, uiWB * uiHB * sizeof(float),
cudaMemcpyHostToDevice);
// Set texture reference parameters
tex_A.addressMode[0] = cudaAddressModeWrap;
tex_A.addressMode[1] = cudaAddressModeWrap;
tex_A.filterMode = cudaFilterModePoint;
tex_B.addressMode[0] = cudaAddressModeWrap;
tex_B.addressMode[1] = cudaAddressModeWrap;
tex_B.filterMode = cudaFilterModePoint;
// Bind the array to the texture reference
cudaBindTextureToArray(tex_A, A_Array, channelDescA);
cudaBindTextureToArray(tex_B, B_Array, channelDescB);
// copy host memory to device
//checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
//checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// warm up the GPU
for (int rpt=0; rpt<5; rpt++)
{
mm_kernel<<< mm_grid, mm_block>>>(d_B,d_C, uiWA, uiWB);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
printf("dimGrid: %dx%d dimBlock: %dx%d mat size: %dx%d\n",mm_GRID_X,mm_GRID_Y,mm_BLOCK_SIZE,mm_BLOCK_SIZE,uiWA,uiHA);
cudaEventRecord(kernel_start, 0);
for (int rpt=0; rpt<ITERATIONS; rpt++)
{
mm_kernel<<< mm_grid, mm_block>>>(d_B,d_C, uiWA, uiWB);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
/*
*/
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
//computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
// bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
// printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
free(reference);
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
return 0;
}
|
2eee5b11e3eeb788d8adefda25e4d488605086de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel(unsigned int* start, unsigned int* end, float* someData,float* moreData){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
doSomeWork(someData[i]);
for(unsigned int j = start[i];j<end[i];++j){
doMoreWork(moreData[j]);
}
}
__global__ void kernel_parent(unsigned int* start, unsigned int* end,float* someData, float* moreData){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
doSomeWork(someData[i]);
kernel_child<<<ceil((end[i] - start[i])/256.0,256)>>>(start[i],end[i],moreData);
}
__global__ void kernel_child(unsigned int start, unsigned int end, float* moreData){
unsigned int j = start + blockIdx.x * blockDim.x + threadIdx.x;
if(j<end){
doMoreWork(moreData[j]);
}
}
| 2eee5b11e3eeb788d8adefda25e4d488605086de.cu | __global__ void kernel(unsigned int* start, unsigned int* end, float* someData,float* moreData){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
doSomeWork(someData[i]);
for(unsigned int j = start[i];j<end[i];++j){
doMoreWork(moreData[j]);
}
}
__global__ void kernel_parent(unsigned int* start, unsigned int* end,float* someData, float* moreData){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
doSomeWork(someData[i]);
kernel_child<<<ceil((end[i] - start[i])/256.0,256)>>>(start[i],end[i],moreData);
}
__global__ void kernel_child(unsigned int start, unsigned int end, float* moreData){
unsigned int j = start + blockIdx.x * blockDim.x + threadIdx.x;
if(j<end){
doMoreWork(moreData[j]);
}
}
|
d613984f699b70bd97c3eabd2629236d9fd2ee71.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudamat_conv_util.cuh"
hipTextureObject_t getTextureObject(cudamat* mat) {
if (mat->tex_obj == 0) {
size_t size = mat->size[0] * mat->size[1] * sizeof(float);
if (size <= TEXTURE_SIZE_MAX) {
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = mat->data_device;
resDesc.res.linear.sizeInBytes = size;
resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipError_t err = hipCreateTextureObject(&(mat->tex_obj), &resDesc, &texDesc, NULL);
if (hipSuccess != err) {
fprintf(stderr, "Error creating texture object for matrix of shape %d %d : (%d) %s.\n",
mat->size[0], mat->size[1], (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
assert(mat->tex_obj != 0); // If this assert is false, then we need to call a kernel which doesn't use textures.
}
return mat->tex_obj;
}
bool FitsAsTexture(cudamat* mat) {
return ((mat->size[0] * mat->size[1] * sizeof(float)) <= TEXTURE_SIZE_MAX);
}
| d613984f699b70bd97c3eabd2629236d9fd2ee71.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudamat_conv_util.cuh"
cudaTextureObject_t getTextureObject(cudamat* mat) {
if (mat->tex_obj == 0) {
size_t size = mat->size[0] * mat->size[1] * sizeof(float);
if (size <= TEXTURE_SIZE_MAX) {
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = mat->data_device;
resDesc.res.linear.sizeInBytes = size;
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaError_t err = cudaCreateTextureObject(&(mat->tex_obj), &resDesc, &texDesc, NULL);
if (cudaSuccess != err) {
fprintf(stderr, "Error creating texture object for matrix of shape %d %d : (%d) %s.\n",
mat->size[0], mat->size[1], (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
assert(mat->tex_obj != 0); // If this assert is false, then we need to call a kernel which doesn't use textures.
}
return mat->tex_obj;
}
bool FitsAsTexture(cudamat* mat) {
return ((mat->size[0] * mat->size[1] * sizeof(float)) <= TEXTURE_SIZE_MAX);
}
|
7776cfdd66a6a2074f420fcd02baa0fbcf6b9166.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "backend/kernel_compiler/gpu/cuda_impl/mirror_pad_impl.cuh"
// check for existence in current padded array on X and Y dims
__inline__ __device__ bool range_check(int x, int y, int padded_width, int padded_height) {
if (((x >= 0) && (x <= padded_width - 1)) && ((y >= 0) && (y <= padded_height - 1))) {
return true;
}
return false;
}
// extract paddings from correct positions given variable paddings_arg size
__inline__ __device__ void extract_paddings(const int64_t *paddings_arg, int padd_dim, int64_t *extracted_paddings) {
const int paddings_offset = MAX_PADDINGS - padd_dim;
for (int i = 0; i < padd_dim; i++) {
extracted_paddings[(paddings_offset + i) * PADDING_SIZE] = paddings_arg[i * PADDING_SIZE];
extracted_paddings[(paddings_offset + i) * PADDING_SIZE + 1] = paddings_arg[i * PADDING_SIZE + 1];
}
}
// for every position, first calculate position it mirrors from in the new padded array
// adjust calculated position to origin dx array dimensions and copy value
template <typename T>
__global__ void MirrorPad(const size_t size, const T *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int padd_dim, const int64_t *paddings_arg, int mode, T *output) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0;
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create anchor points for non mirrored data inside new tensor
int ap1_x = paddings[WIDTH + LEFT];
int ap2_x = paddings[WIDTH + LEFT] + old_width - 1;
int ap1_y = paddings[HEIGHT + TOP];
int ap2_y = paddings[HEIGHT + TOP] + old_height - 1;
int ap1_channel = paddings[CHANNEL + LEFT];
int ap2_channel = paddings[CHANNEL + LEFT] + old_channel - 1;
int ap1_batch = paddings[BATCH + LEFT];
int ap2_batch = paddings[BATCH + LEFT] + old_batch - 1;
int channels_new = old_channel + paddings[CHANNEL + LEFT] + paddings[CHANNEL + RIGHT];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / padded_width) / padded_height;
// cur position
const int padded_x = pos % padded_width;
const int padded_y = (pos / padded_width) % padded_height;
const int padded_channel = block_num % channels_new;
const int padded_batch = block_num / channels_new;
// distance from anchor points
// can be +/- depending on position
int x_dist = 0;
int y_dist = 0;
int channel_dist = 0;
int batch_dist = 0;
// data to mirror from in new tensor dims
int matchval_x_index = padded_x;
int matchval_y_index = padded_y;
int matchval_channel_index = padded_channel;
int matchval_batch_index = padded_batch;
int equiv_block_num = 0;
// update matching index in original tensor across all 4 dims
if ((padded_x < ap1_x) || (padded_x > ap2_x)) {
x_dist = (padded_x < ap1_x) ? (ap1_x - padded_x) : (padded_x - ap2_x);
matchval_x_index = (padded_x < ap1_x) ? (ap1_x + x_dist - mode) : (ap2_x - x_dist + mode);
}
if ((padded_y < ap1_y) || (padded_y > ap2_y)) {
y_dist = (padded_y < ap1_y) ? (ap1_y - padded_y) : (padded_y - ap2_y);
matchval_y_index = (padded_y < ap1_y) ? (ap1_y + y_dist - mode) : (ap2_y - y_dist + mode);
}
if ((padded_channel < ap1_channel) || (padded_channel > ap2_channel)) {
channel_dist = (padded_channel < ap1_channel) ? (ap1_channel - padded_channel) : (padded_channel - ap2_channel);
matchval_channel_index =
(padded_channel < ap1_channel) ? (ap1_channel + channel_dist - mode) : (ap2_channel - channel_dist + mode);
}
if ((padded_batch < ap1_batch) || (padded_batch > ap2_batch)) {
batch_dist = (padded_batch < ap1_batch) ? (ap1_batch - padded_batch) : (padded_batch - ap2_batch);
matchval_batch_index =
(padded_batch < ap1_batch) ? (ap1_batch + batch_dist - mode) : (ap2_batch - batch_dist + mode);
}
// calculate equivalent block in input
equiv_block_num = ((matchval_batch_index - paddings[BATCH + LEFT]) * old_channel) +
(matchval_channel_index - paddings[CHANNEL + LEFT]);
// copy data from equiv block and adjusted x and y values in unpadded tensor
output[pos] = input[(equiv_block_num * old_height + matchval_y_index - paddings[HEIGHT + TOP]) * old_width +
matchval_x_index - paddings[WIDTH + LEFT]];
}
return;
}
// Accumlates mirrored values across batch and channels into an interim workspace array
// One thread for every output value and a sweeping add logic allows kernel to avoid using
// slower locked based atomic adds
template <typename T>
__global__ void MirrorPadGradBatchChannel(const size_t size, T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width,
const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings_arg, int mode, T *dx) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0; // init all to 0
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create anchor points for non mirrored data inside new tensor
int ap1_channel = paddings[CHANNEL + LEFT];
int ap2_channel = paddings[CHANNEL + LEFT] + dx_channels - 1;
int ap1_batch = paddings[BATCH + LEFT];
int ap2_batch = paddings[BATCH + LEFT] + dx_batches - 1;
int dy_channels = dx_channels + paddings[CHANNEL + LEFT] + paddings[CHANNEL + RIGHT];
int dy_batches = dx_batches + paddings[BATCH + LEFT] + paddings[BATCH + RIGHT];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / dy_width) / dy_height;
// Select exact position inside the dy_interim array
const int interim_x = pos % dy_width;
const int interim_y = (pos / dy_width) % dy_height;
const int interim_channel = block_num % dx_channels;
const int interim_batch = block_num / dx_channels;
interim_dy[pos] = 0; // init
// map cur interim channel and batch to equivalent in padded dy array
const int equiv_dy_channel = interim_channel + paddings[CHANNEL + LEFT];
const int equiv_dy_batch = interim_batch + paddings[BATCH + LEFT];
int target_batch = 0;
int target_channel = 0;
int equiv_block_num = 0;
equiv_block_num = ((equiv_dy_batch * dy_channels) + equiv_dy_channel);
// generate values to sweep over all possible mirrored points
auto batch_offsets = {2 * (ap1_batch - equiv_dy_batch) - mode, 0, 2 * (ap2_batch - equiv_dy_batch) + mode};
auto channel_offsets = {2 * (ap1_channel - equiv_dy_channel) - mode, 0,
2 * (ap2_channel - equiv_dy_channel) + mode};
for (auto b_adjust : batch_offsets) {
for (auto c_adjust : channel_offsets) {
target_batch = equiv_dy_batch + b_adjust;
target_channel = equiv_dy_channel + c_adjust;
// bounds check - if within bounds, mirrored value exists - copy dy
if ((target_batch < 0) || (target_batch > (dy_batches - 1)) || (target_channel < 0) ||
(target_channel > (dy_channels - 1))) {
continue; // no mirrored value with these target values
}
equiv_block_num = ((target_batch * dy_channels) + target_channel);
// Copy data and set value at input to 0 to avoid duplicates in reflect mode
interim_dy[pos] = interim_dy[pos] + dy[(equiv_block_num * dy_height + interim_y) * dy_width + interim_x];
dy[(equiv_block_num * dy_height + interim_y) * dy_width + interim_x] = 0;
}
}
}
return;
}
// Accumulate mirrored values across width and height from the interim dy array into output array
// Similar sweep logic again allows for a no lock based logic
template <typename T>
__global__ void MirrorPadGrad_Width_Height(const size_t size, const T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width,
const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings_arg, int mode, T *dx) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0; // init all to 0
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create required anchor points for non-mirrored data inside new tensor
int ap1_x = paddings[WIDTH + LEFT];
int ap2_x = paddings[WIDTH + LEFT] + dx_width - 1;
int ap1_y = paddings[HEIGHT + TOP];
int ap2_y = paddings[HEIGHT + TOP] + dx_height - 1;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int dx_block_num = (pos / dx_width) / dx_height;
const int grad_x = (pos % dx_width) + paddings[WIDTH + LEFT];
const int grad_y = ((pos / dx_width) % dx_height) + paddings[HEIGHT + TOP];
// copy position's own value into output
dx[pos] = interim_dy[(dx_block_num * dy_height + grad_y) * dy_width + grad_x];
int x_dist_1 = (ap1_x - grad_x - mode);
int y_dist_1 = (ap1_y - grad_y - mode);
int x_dist_2 = (ap2_x - grad_x + mode);
int y_dist_2 = (ap2_y - grad_y + mode);
int axis_dist[] = {x_dist_1, x_dist_2, y_dist_1, y_dist_2};
int anch_point[] = {ap1_x, ap2_x, ap1_y, ap2_y};
bool x_axis_check[] = {true, true, false, false}; // true - update X , false - update Y
int temp_x = 0;
int temp_y = 0;
// mirroring in axis lines
for (int x = 0; x < 4; x++) {
if (axis_dist[x] != 0) {
if (x_axis_check[x]) {
temp_y = grad_y;
temp_x = anch_point[x] + axis_dist[x];
} else {
temp_x = grad_x;
temp_y = anch_point[x] + axis_dist[x];
}
if (range_check(temp_x, temp_y, dy_width, dy_height)) {
dx[pos] = dx[pos] + interim_dy[(dx_block_num * dy_height + temp_y) * dy_width + temp_x];
}
}
}
// mirroring at corners
for (int x = 0; x < 2; x++) {
for (int y = 2; y < 4; y++) {
if ((axis_dist[x] != 0) && (axis_dist[y] != 0)) {
temp_x = anch_point[x] + axis_dist[x];
temp_y = anch_point[y] + axis_dist[y];
if (range_check(temp_x, temp_y, dy_width, dy_height)) {
dx[pos] = dx[pos] + interim_dy[(dx_block_num * dy_height + temp_y) * dy_width + temp_x];
}
}
}
}
}
return;
}
template <typename T>
void CalMirrorPad(const size_t size, const T *input, const int old_batch, const int old_channel, const int old_height,
const int old_width, const int padded_height, const int padded_width, int padd_num,
const int64_t *paddings, const int mode, T *output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MirrorPad), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, old_batch, old_channel, old_height,
old_width, padded_height, padded_width, padd_num,
paddings, mode, output);
return;
}
template <typename T>
void CalMirrorPadGrad(const size_t dx_size, const size_t interim_dy_size, T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width, const int dy_height,
const int dy_width, const int padd_dim, const int64_t *paddings, int mode, T *dx,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MirrorPadGradBatchChannel), dim3(GET_BLOCKS(interim_dy_size)), dim3(GET_THREADS), 0, cuda_stream,
interim_dy_size, dy, interim_dy, dx_batches, dx_channels, dx_height, dx_width, dy_height, dy_width, padd_dim,
paddings, mode, dx);
hipLaunchKernelGGL(( MirrorPadGrad_Width_Height), dim3(GET_BLOCKS(dx_size)), dim3(GET_THREADS), 0, cuda_stream,
dx_size, dy, interim_dy, dx_batches, dx_channels, dx_height, dx_width, dy_height, dy_width, padd_dim, paddings,
mode, dx);
return;
}
template void CalMirrorPad<float>(const size_t size, const float *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode,
float *output, hipStream_t cuda_stream);
template void CalMirrorPad<half>(const size_t size, const half *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode, half *output,
hipStream_t cuda_stream);
template void CalMirrorPad<int>(const size_t size, const int *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode, int *output,
hipStream_t cuda_stream);
template void CalMirrorPadGrad<float>(const size_t dx_size, const size_t dy_size, float *dy, float *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, float *dx, hipStream_t cuda_stream);
template void CalMirrorPadGrad<half>(const size_t dx_size, const size_t dy_size, half *dy, half *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, half *dx, hipStream_t cuda_stream);
template void CalMirrorPadGrad<int>(const size_t dx_size, const size_t dy_size, int *dy, int *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, int *dx, hipStream_t cuda_stream);
| 7776cfdd66a6a2074f420fcd02baa0fbcf6b9166.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "backend/kernel_compiler/gpu/cuda_impl/mirror_pad_impl.cuh"
// check for existence in current padded array on X and Y dims
__inline__ __device__ bool range_check(int x, int y, int padded_width, int padded_height) {
if (((x >= 0) && (x <= padded_width - 1)) && ((y >= 0) && (y <= padded_height - 1))) {
return true;
}
return false;
}
// extract paddings from correct positions given variable paddings_arg size
__inline__ __device__ void extract_paddings(const int64_t *paddings_arg, int padd_dim, int64_t *extracted_paddings) {
const int paddings_offset = MAX_PADDINGS - padd_dim;
for (int i = 0; i < padd_dim; i++) {
extracted_paddings[(paddings_offset + i) * PADDING_SIZE] = paddings_arg[i * PADDING_SIZE];
extracted_paddings[(paddings_offset + i) * PADDING_SIZE + 1] = paddings_arg[i * PADDING_SIZE + 1];
}
}
// for every position, first calculate position it mirrors from in the new padded array
// adjust calculated position to origin dx array dimensions and copy value
template <typename T>
__global__ void MirrorPad(const size_t size, const T *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int padd_dim, const int64_t *paddings_arg, int mode, T *output) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0;
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create anchor points for non mirrored data inside new tensor
int ap1_x = paddings[WIDTH + LEFT];
int ap2_x = paddings[WIDTH + LEFT] + old_width - 1;
int ap1_y = paddings[HEIGHT + TOP];
int ap2_y = paddings[HEIGHT + TOP] + old_height - 1;
int ap1_channel = paddings[CHANNEL + LEFT];
int ap2_channel = paddings[CHANNEL + LEFT] + old_channel - 1;
int ap1_batch = paddings[BATCH + LEFT];
int ap2_batch = paddings[BATCH + LEFT] + old_batch - 1;
int channels_new = old_channel + paddings[CHANNEL + LEFT] + paddings[CHANNEL + RIGHT];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / padded_width) / padded_height;
// cur position
const int padded_x = pos % padded_width;
const int padded_y = (pos / padded_width) % padded_height;
const int padded_channel = block_num % channels_new;
const int padded_batch = block_num / channels_new;
// distance from anchor points
// can be +/- depending on position
int x_dist = 0;
int y_dist = 0;
int channel_dist = 0;
int batch_dist = 0;
// data to mirror from in new tensor dims
int matchval_x_index = padded_x;
int matchval_y_index = padded_y;
int matchval_channel_index = padded_channel;
int matchval_batch_index = padded_batch;
int equiv_block_num = 0;
// update matching index in original tensor across all 4 dims
if ((padded_x < ap1_x) || (padded_x > ap2_x)) {
x_dist = (padded_x < ap1_x) ? (ap1_x - padded_x) : (padded_x - ap2_x);
matchval_x_index = (padded_x < ap1_x) ? (ap1_x + x_dist - mode) : (ap2_x - x_dist + mode);
}
if ((padded_y < ap1_y) || (padded_y > ap2_y)) {
y_dist = (padded_y < ap1_y) ? (ap1_y - padded_y) : (padded_y - ap2_y);
matchval_y_index = (padded_y < ap1_y) ? (ap1_y + y_dist - mode) : (ap2_y - y_dist + mode);
}
if ((padded_channel < ap1_channel) || (padded_channel > ap2_channel)) {
channel_dist = (padded_channel < ap1_channel) ? (ap1_channel - padded_channel) : (padded_channel - ap2_channel);
matchval_channel_index =
(padded_channel < ap1_channel) ? (ap1_channel + channel_dist - mode) : (ap2_channel - channel_dist + mode);
}
if ((padded_batch < ap1_batch) || (padded_batch > ap2_batch)) {
batch_dist = (padded_batch < ap1_batch) ? (ap1_batch - padded_batch) : (padded_batch - ap2_batch);
matchval_batch_index =
(padded_batch < ap1_batch) ? (ap1_batch + batch_dist - mode) : (ap2_batch - batch_dist + mode);
}
// calculate equivalent block in input
equiv_block_num = ((matchval_batch_index - paddings[BATCH + LEFT]) * old_channel) +
(matchval_channel_index - paddings[CHANNEL + LEFT]);
// copy data from equiv block and adjusted x and y values in unpadded tensor
output[pos] = input[(equiv_block_num * old_height + matchval_y_index - paddings[HEIGHT + TOP]) * old_width +
matchval_x_index - paddings[WIDTH + LEFT]];
}
return;
}
// Accumlates mirrored values across batch and channels into an interim workspace array
// One thread for every output value and a sweeping add logic allows kernel to avoid using
// slower locked based atomic adds
template <typename T>
__global__ void MirrorPadGradBatchChannel(const size_t size, T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width,
const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings_arg, int mode, T *dx) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0; // init all to 0
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create anchor points for non mirrored data inside new tensor
int ap1_channel = paddings[CHANNEL + LEFT];
int ap2_channel = paddings[CHANNEL + LEFT] + dx_channels - 1;
int ap1_batch = paddings[BATCH + LEFT];
int ap2_batch = paddings[BATCH + LEFT] + dx_batches - 1;
int dy_channels = dx_channels + paddings[CHANNEL + LEFT] + paddings[CHANNEL + RIGHT];
int dy_batches = dx_batches + paddings[BATCH + LEFT] + paddings[BATCH + RIGHT];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / dy_width) / dy_height;
// Select exact position inside the dy_interim array
const int interim_x = pos % dy_width;
const int interim_y = (pos / dy_width) % dy_height;
const int interim_channel = block_num % dx_channels;
const int interim_batch = block_num / dx_channels;
interim_dy[pos] = 0; // init
// map cur interim channel and batch to equivalent in padded dy array
const int equiv_dy_channel = interim_channel + paddings[CHANNEL + LEFT];
const int equiv_dy_batch = interim_batch + paddings[BATCH + LEFT];
int target_batch = 0;
int target_channel = 0;
int equiv_block_num = 0;
equiv_block_num = ((equiv_dy_batch * dy_channels) + equiv_dy_channel);
// generate values to sweep over all possible mirrored points
auto batch_offsets = {2 * (ap1_batch - equiv_dy_batch) - mode, 0, 2 * (ap2_batch - equiv_dy_batch) + mode};
auto channel_offsets = {2 * (ap1_channel - equiv_dy_channel) - mode, 0,
2 * (ap2_channel - equiv_dy_channel) + mode};
for (auto b_adjust : batch_offsets) {
for (auto c_adjust : channel_offsets) {
target_batch = equiv_dy_batch + b_adjust;
target_channel = equiv_dy_channel + c_adjust;
// bounds check - if within bounds, mirrored value exists - copy dy
if ((target_batch < 0) || (target_batch > (dy_batches - 1)) || (target_channel < 0) ||
(target_channel > (dy_channels - 1))) {
continue; // no mirrored value with these target values
}
equiv_block_num = ((target_batch * dy_channels) + target_channel);
// Copy data and set value at input to 0 to avoid duplicates in reflect mode
interim_dy[pos] = interim_dy[pos] + dy[(equiv_block_num * dy_height + interim_y) * dy_width + interim_x];
dy[(equiv_block_num * dy_height + interim_y) * dy_width + interim_x] = 0;
}
}
}
return;
}
// Accumulate mirrored values across width and height from the interim dy array into output array
// Similar sweep logic again allows for a no lock based logic
template <typename T>
__global__ void MirrorPadGrad_Width_Height(const size_t size, const T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width,
const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings_arg, int mode, T *dx) {
int64_t paddings[MAX_PADDINGS * PADDING_SIZE]; // local and fixed size to keep in registers
for (int i = 0; i < MAX_PADDINGS * PADDING_SIZE; i++) {
paddings[i] = 0; // init all to 0
}
extract_paddings(paddings_arg, padd_dim, paddings);
// Create required anchor points for non-mirrored data inside new tensor
int ap1_x = paddings[WIDTH + LEFT];
int ap2_x = paddings[WIDTH + LEFT] + dx_width - 1;
int ap1_y = paddings[HEIGHT + TOP];
int ap2_y = paddings[HEIGHT + TOP] + dx_height - 1;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int dx_block_num = (pos / dx_width) / dx_height;
const int grad_x = (pos % dx_width) + paddings[WIDTH + LEFT];
const int grad_y = ((pos / dx_width) % dx_height) + paddings[HEIGHT + TOP];
// copy position's own value into output
dx[pos] = interim_dy[(dx_block_num * dy_height + grad_y) * dy_width + grad_x];
int x_dist_1 = (ap1_x - grad_x - mode);
int y_dist_1 = (ap1_y - grad_y - mode);
int x_dist_2 = (ap2_x - grad_x + mode);
int y_dist_2 = (ap2_y - grad_y + mode);
int axis_dist[] = {x_dist_1, x_dist_2, y_dist_1, y_dist_2};
int anch_point[] = {ap1_x, ap2_x, ap1_y, ap2_y};
bool x_axis_check[] = {true, true, false, false}; // true - update X , false - update Y
int temp_x = 0;
int temp_y = 0;
// mirroring in axis lines
for (int x = 0; x < 4; x++) {
if (axis_dist[x] != 0) {
if (x_axis_check[x]) {
temp_y = grad_y;
temp_x = anch_point[x] + axis_dist[x];
} else {
temp_x = grad_x;
temp_y = anch_point[x] + axis_dist[x];
}
if (range_check(temp_x, temp_y, dy_width, dy_height)) {
dx[pos] = dx[pos] + interim_dy[(dx_block_num * dy_height + temp_y) * dy_width + temp_x];
}
}
}
// mirroring at corners
for (int x = 0; x < 2; x++) {
for (int y = 2; y < 4; y++) {
if ((axis_dist[x] != 0) && (axis_dist[y] != 0)) {
temp_x = anch_point[x] + axis_dist[x];
temp_y = anch_point[y] + axis_dist[y];
if (range_check(temp_x, temp_y, dy_width, dy_height)) {
dx[pos] = dx[pos] + interim_dy[(dx_block_num * dy_height + temp_y) * dy_width + temp_x];
}
}
}
}
}
return;
}
template <typename T>
void CalMirrorPad(const size_t size, const T *input, const int old_batch, const int old_channel, const int old_height,
const int old_width, const int padded_height, const int padded_width, int padd_num,
const int64_t *paddings, const int mode, T *output, cudaStream_t cuda_stream) {
MirrorPad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, old_batch, old_channel, old_height,
old_width, padded_height, padded_width, padd_num,
paddings, mode, output);
return;
}
template <typename T>
void CalMirrorPadGrad(const size_t dx_size, const size_t interim_dy_size, T *dy, T *interim_dy, const int dx_batches,
const int dx_channels, const int dx_height, const int dx_width, const int dy_height,
const int dy_width, const int padd_dim, const int64_t *paddings, int mode, T *dx,
cudaStream_t cuda_stream) {
MirrorPadGradBatchChannel<<<GET_BLOCKS(interim_dy_size), GET_THREADS, 0, cuda_stream>>>(
interim_dy_size, dy, interim_dy, dx_batches, dx_channels, dx_height, dx_width, dy_height, dy_width, padd_dim,
paddings, mode, dx);
MirrorPadGrad_Width_Height<<<GET_BLOCKS(dx_size), GET_THREADS, 0, cuda_stream>>>(
dx_size, dy, interim_dy, dx_batches, dx_channels, dx_height, dx_width, dy_height, dy_width, padd_dim, paddings,
mode, dx);
return;
}
template void CalMirrorPad<float>(const size_t size, const float *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode,
float *output, cudaStream_t cuda_stream);
template void CalMirrorPad<half>(const size_t size, const half *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode, half *output,
cudaStream_t cuda_stream);
template void CalMirrorPad<int>(const size_t size, const int *input, const int old_batch, const int old_channel,
const int old_height, const int old_width, const int padded_height,
const int padded_width, int padd_num, const int64_t *paddings, int mode, int *output,
cudaStream_t cuda_stream);
template void CalMirrorPadGrad<float>(const size_t dx_size, const size_t dy_size, float *dy, float *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, float *dx, cudaStream_t cuda_stream);
template void CalMirrorPadGrad<half>(const size_t dx_size, const size_t dy_size, half *dy, half *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, half *dx, cudaStream_t cuda_stream);
template void CalMirrorPadGrad<int>(const size_t dx_size, const size_t dy_size, int *dy, int *interim_dy,
const int dx_batches, const int dx_channels, const int dx_height,
const int dx_width, const int dy_height, const int dy_width, const int padd_dim,
const int64_t *paddings, int mode, int *dx, cudaStream_t cuda_stream);
|
4380a46b461b1f5b7fd651f00a56c96d89a1f5f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I)
{
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", " << I.seed << "}"
<< std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int i = 0; i < N; i++)
WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
// R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
// device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
// create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
// compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, true);
dexp = hexp;
// compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(), cols, rows, stream);
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int j = 0; j < M; j++)
WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
// R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
// device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
// create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
// compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, true);
dexp = hexp;
// compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(), cols, rows, stream);
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {{tolF, 4, 4, 1234},
{tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234},
{tolF, 1024, 256, 1234},
{tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234},
{tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {{tolD, 4, 4, 1234},
{tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234},
{tolD, 1024, 256, 1234},
{tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234},
{tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD, ::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD, ::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
| 4380a46b461b1f5b7fd651f00a56c96d89a1f5f9.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I)
{
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", " << I.seed << "}"
<< std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int i = 0; i < N; i++)
WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
// R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
// device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
// create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
// compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, true);
dexp = hexp;
// compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(), cols, rows, stream);
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int j = 0; j < M; j++)
WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
// R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
// device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
// create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
// compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, true);
dexp = hexp;
// compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(), cols, rows, stream);
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {{tolF, 4, 4, 1234},
{tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234},
{tolF, 1024, 256, 1234},
{tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234},
{tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {{tolD, 4, 4, 1234},
{tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234},
{tolD, 1024, 256, 1234},
{tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234},
{tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD, ::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD, ::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
|
7e0c46af5141192d0f6532f3cb5ee43ea963648a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<addTest.h>
__global__ void reduceUnroll4(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 4;
if (id + 3 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll2(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 2;
if (id + blockDim.x < num) {
src[id] += src[id + blockDim.x];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceNeighboredLess(int* src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
int* data = src + blockIdx.x * blockDim.x;
if (id >= num) return;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int idx = 2 * tid * stride;
if (idx < blockDim.x) {
data[idx] += data[idx + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceInterieaved(int* src, int *dst, int num) {
// set threadId
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
dst[blockIdx.x] = data[0];
}
}
__global__ void reduceNeighbored(int* src, int *dst, int num) {
// set threadId
unsigned int id_thread = threadIdx.x;
if (id_thread >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((id_thread % (2 * stride)) == 0) {
data[id_thread] += data[stride + id_thread];
}
__syncthreads();
}
if (id_thread == 0) {
dst[blockIdx.x] = data[0];
}
}
// CPU
int reduceNeighbored_cpu(int *data, int num) {
if (num == 1) return data[0];
int const stride = num / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
if (num % 2 == 1) {
data[0] += data[num - 1];
}
return reduceNeighbored_cpu(data, stride);
}
int main(void) {
int dev = 0;
initDevice(dev);
int num = 1 << 20;
int* x_h = (int *)malloc(num * sizeof(int));
int* dst_cpu = (int *)malloc(num * sizeof(int));
int* dst_dev_cpu = (int *)malloc(num * sizeof(int));
for(int i = 0; i < num; i++) {
x_h[i] = i % 3;
}
int *x_d, *dst_d;
CHECK(hipMalloc((int**)&x_d, num * sizeof(int)));
CHECK(hipMalloc((int**)&dst_d, num * sizeof(int)));
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
int block = 1024;
int grid = (num + block -1) / block;
printf("grid : %d , block : %d\n", grid, block);
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
int sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceNeighboredLess), dim3(grid), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceInterieaved), dim3(grid), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceUnroll2), dim3(grid / 2), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 2; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceUnroll4), dim3(grid / 4), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 4; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceUnroll8), dim3(grid / 8), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceUnrollWarps8), dim3(grid / 8), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceCompleteUnrollWarps8), dim3(grid / 8), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
hipFree(x_d);
hipFree(dst_d);
free(x_h);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
| 7e0c46af5141192d0f6532f3cb5ee43ea963648a.cu | #include<iostream>
#include<addTest.h>
__global__ void reduceUnroll4(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 4;
if (id + 3 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll2(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 2;
if (id + blockDim.x < num) {
src[id] += src[id + blockDim.x];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceNeighboredLess(int* src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
int* data = src + blockIdx.x * blockDim.x;
if (id >= num) return;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int idx = 2 * tid * stride;
if (idx < blockDim.x) {
data[idx] += data[idx + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceInterieaved(int* src, int *dst, int num) {
// set threadId
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
dst[blockIdx.x] = data[0];
}
}
__global__ void reduceNeighbored(int* src, int *dst, int num) {
// set threadId
unsigned int id_thread = threadIdx.x;
if (id_thread >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((id_thread % (2 * stride)) == 0) {
data[id_thread] += data[stride + id_thread];
}
__syncthreads();
}
if (id_thread == 0) {
dst[blockIdx.x] = data[0];
}
}
// CPU
int reduceNeighbored_cpu(int *data, int num) {
if (num == 1) return data[0];
int const stride = num / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
if (num % 2 == 1) {
data[0] += data[num - 1];
}
return reduceNeighbored_cpu(data, stride);
}
int main(void) {
int dev = 0;
initDevice(dev);
int num = 1 << 20;
int* x_h = (int *)malloc(num * sizeof(int));
int* dst_cpu = (int *)malloc(num * sizeof(int));
int* dst_dev_cpu = (int *)malloc(num * sizeof(int));
for(int i = 0; i < num; i++) {
x_h[i] = i % 3;
}
int *x_d, *dst_d;
CHECK(cudaMalloc((int**)&x_d, num * sizeof(int)));
CHECK(cudaMalloc((int**)&dst_d, num * sizeof(int)));
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
int block = 1024;
int grid = (num + block -1) / block;
printf("grid : %d , block : %d\n", grid, block);
reduceNeighbored<<<grid, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
int sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceNeighboredLess<<<grid, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceInterieaved<<<grid, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceUnroll2<<<grid / 2, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 2; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceUnroll4<<<grid / 4, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 4; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceUnroll8<<<grid / 8, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceCompleteUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid / 8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
cudaFree(x_d);
cudaFree(dst_d);
free(x_h);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
|
e2e8be71da91c5f488549779c1d36e30e571b6a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
#include "PxPhysics.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "PxMat33.h"
#include "PxStrideIterator.h"
namespace physx
{
template <typename T>
__device__ T* ptrOffset(T* p, PxU32 byteOffset)
{
return (T*)((unsigned char*)(p) + byteOffset);
}
#if __CUDA_ARCH__ < 200
__device__ PxU32 gOffset;
#else
__device__ __shared__ PxU32 gOffset;
#endif
// copies orientations and positions to the destination vertex
// buffer based on the validityBitmap state
extern "C" __global__ void updateInstancedVB(
PxVec3* destPositions,
PxVec3* destRotation0,
PxVec3* destRotation1,
PxVec3* destRotation2,
PxU32 destStride,
const PxVec4* srcPositions,
const PxMat33* srcRotations,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
const PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
*ptrOffset(destRotation0, offset) = srcRotations[index].column0;
*ptrOffset(destRotation1, offset) = srcRotations[index].column1;
*ptrOffset(destRotation2, offset) = srcRotations[index].column2;
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
++destIndex;
}
}
}
}
// copies positions and alpha to the destination vertex buffer based on
// validity bitmap and particle life times
extern "C" __global__ void updateBillboardVB(
PxVec3* destPositions,
PxU8* destAlphas,
PxU32 destStride,
PxF32 fadingPeriod,
const PxVec4* srcPositions,
const PxReal* srcLifetimes,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
// copy position
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
// update alpha
if (srcLifetimes)
{
PxU8 lifetime = 0;
if(srcLifetimes[index] >= fadingPeriod)
lifetime = 255;
else
{
if(srcLifetimes[index] <= 0.0f)
lifetime = 0;
else
lifetime = static_cast<PxU8>(srcLifetimes[index] * 255 / fadingPeriod);
}
destAlphas[destIndex*4] = lifetime;
}
++destIndex;
}
}
}
}
} | e2e8be71da91c5f488549779c1d36e30e571b6a7.cu | // This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved.
#include "PxPhysics.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "PxMat33.h"
#include "PxStrideIterator.h"
namespace physx
{
template <typename T>
__device__ T* ptrOffset(T* p, PxU32 byteOffset)
{
return (T*)((unsigned char*)(p) + byteOffset);
}
#if __CUDA_ARCH__ < 200
__device__ PxU32 gOffset;
#else
__device__ __shared__ PxU32 gOffset;
#endif
// copies orientations and positions to the destination vertex
// buffer based on the validityBitmap state
extern "C" __global__ void updateInstancedVB(
PxVec3* destPositions,
PxVec3* destRotation0,
PxVec3* destRotation1,
PxVec3* destRotation2,
PxU32 destStride,
const PxVec4* srcPositions,
const PxMat33* srcRotations,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
const PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
*ptrOffset(destRotation0, offset) = srcRotations[index].column0;
*ptrOffset(destRotation1, offset) = srcRotations[index].column1;
*ptrOffset(destRotation2, offset) = srcRotations[index].column2;
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
++destIndex;
}
}
}
}
// copies positions and alpha to the destination vertex buffer based on
// validity bitmap and particle life times
extern "C" __global__ void updateBillboardVB(
PxVec3* destPositions,
PxU8* destAlphas,
PxU32 destStride,
PxF32 fadingPeriod,
const PxVec4* srcPositions,
const PxReal* srcLifetimes,
const PxU32* validParticleBitmap,
PxU32 validParticleRange)
{
if (!threadIdx.x)
gOffset = 0;
__syncthreads();
if (validParticleRange)
{
for (PxU32 w=threadIdx.x; w <= (validParticleRange) >> 5; w+=blockDim.x)
{
const PxU32 srcBaseIndex = w << 5;
// reserve space in the output vertex buffer based on
// population count of validity bitmap (avoids excess atomic ops)
PxU32 destIndex = atomicAdd(&gOffset, __popc(validParticleBitmap[w]));
for (PxU32 b=validParticleBitmap[w]; b; b &= b-1)
{
PxU32 index = srcBaseIndex | __ffs(b)-1;
const PxU32 offset = destIndex*destStride;
// copy position
PxVec3* p = ptrOffset(destPositions, offset);
p->x = srcPositions[index].x;
p->y = srcPositions[index].y;
p->z = srcPositions[index].z;
// update alpha
if (srcLifetimes)
{
PxU8 lifetime = 0;
if(srcLifetimes[index] >= fadingPeriod)
lifetime = 255;
else
{
if(srcLifetimes[index] <= 0.0f)
lifetime = 0;
else
lifetime = static_cast<PxU8>(srcLifetimes[index] * 255 / fadingPeriod);
}
destAlphas[destIndex*4] = lifetime;
}
++destIndex;
}
}
}
}
} |
b483420ba602a17f87ae0037016e7c12145d7100.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
#include "device_launch_parameters.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int i = idx; i < numCols * numRows; i += step)
{
greyImage[idx] = .299f * rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int MAX_DIM = 1024;
const dim3 blockSize(MAX_DIM, 1, 1); //TODO
const dim3 gridSize(((size_t)(numRows * numCols) + MAX_DIM) / MAX_DIM, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| b483420ba602a17f87ae0037016e7c12145d7100.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
#include "device_launch_parameters.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int i = idx; i < numCols * numRows; i += step)
{
greyImage[idx] = .299f * rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int MAX_DIM = 1024;
const dim3 blockSize(MAX_DIM, 1, 1); //TODO
const dim3 gridSize(((size_t)(numRows * numCols) + MAX_DIM) / MAX_DIM, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
kmeans.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <kernel.cu>
#include <kmeans.hh>
//using namespace std;
// input from python:
// data, nclusters, tol (threshold), maxiters
// output:
// labels (membership), iterations, centroids (clusters)
KmeansGPU::KmeansGPU(float threshold_,
int cluster_num_,
int npoints_,
int nfeatures_,
int maxiter,
float* data_in
)
{
// tic();
threshold = threshold_;
nclusters = cluster_num_;
npoints = npoints_;
nfeatures = nfeatures_;
nloops = maxiter;
membership_bytes = npoints * sizeof(int);
clusters_bytes = nclusters * nfeatures * sizeof(float);
data_bytes = npoints * nfeatures * sizeof(float);
clusters_members_bytes = nclusters * sizeof(float);
// read data to unified memory
if(data != NULL) hipFree(data);
hipMallocManaged((void **)&data, data_bytes);
hipMemcpy(data, data_in, data_bytes, hipMemcpyHostToDevice);
// allocate membership
if(membership != NULL) hipFree(membership);
if(new_membership != NULL) hipFree(new_membership);
hipMallocManaged((void**)&membership, membership_bytes);
hipMallocManaged((void**)&new_membership, membership_bytes);
// allocate delta
if(delta != NULL) hipFree(delta);
hipMallocManaged((void**)&delta, sizeof(float));
// clusters, new clusters
if(clusters != NULL) hipFree(clusters);
if(new_clusters != NULL) hipFree(new_clusters);
if(new_clusters_members != NULL) hipFree(new_clusters_members);
hipMallocManaged((void**)&clusters, clusters_bytes);
hipMallocManaged((void**)&new_clusters, clusters_bytes);
hipMallocManaged((void**)&new_clusters_members, clusters_members_bytes);
//--------------------------------------------------------------------//
// pick the first [nclusters] samples as the initial clusters
//--------------------------------------------------------------------//
for(int i=0; i<nclusters; i++) {
for(int j=0; j<nfeatures; j++) {
clusters[i * nfeatures + j] = data_in[i * nfeatures + j];
}
}
// gpu kernel configuration
blocksize = 128;
warps_per_blk = blocksize >> 5;
blkDim = dim3(blocksize, 1, 1);
grdDim = dim3(BLK(npoints, blocksize), 1, 1);
/*
printf("\ndata_in\n");
print_array(data_in, npoints, nfeatures);
printf("\nclusters\n");
print_array(clusters, nclusters, nfeatures);
printf("threshold : %f\n", threshold);
printf("nclusters: %d\n", nclusters);
printf("nfeatures: %d\n", nfeatures);
printf("npoints: %d\n", npoints);
printf("maxiter: %d\n", nloops);
printf("warps per blk : %d\n", warps_per_blk);
printf("grd : %d, blk : %d\n", grdDim.x, blkDim.x);
*/
// toc();
}
KmeansGPU::~KmeansGPU() {
Cleanup();
}
void KmeansGPU::print_array(float *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%f ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::print_array(int *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%d ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::Cleanup() {
if(data != NULL) hipFree(data);
if(membership != NULL) hipFree(membership);
if(new_membership != NULL) hipFree(new_membership);
if(delta != NULL) hipFree(delta);
if(clusters != NULL) hipFree(clusters);
if(new_clusters != NULL) hipFree(new_clusters);
if(new_clusters_members != NULL) hipFree(new_clusters_members);
}
//----------------------------------------------------------------------------//
// Run Kmeans
//----------------------------------------------------------------------------//
void KmeansGPU::Run()
{
if (nclusters > npoints) {
fprintf(stderr, "Can't have more clusters (%d) than the points (%d)!\n",
nclusters, npoints);
Cleanup();
exit(1);
}
//----------------------//
// copy clusters to contant memory
//----------------------//
hipMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, hipMemcpyHostToDevice);
// the membership is intialized with 0
hipMemset(membership, 0, membership_bytes);
loop_count = 0;
int cnt = 1;
for(int i=0; i<nloops; i++)
{
cnt = Kmeans_gpu();
if(cnt == 0) break;
}
//printf("loop count : %d\n", loop_count);
}
//----------------------------------------------------------------------------//
// Run Kmeans : GPU Kernels
//----------------------------------------------------------------------------//
int KmeansGPU::Kmeans_gpu()
{
loop_count++;
// change to 0 for each iteration
delta[0] = 0.f;
// start from zero for each iteration
hipMemset(new_clusters, 0, clusters_bytes);
hipMemset(new_clusters_members, 0, clusters_members_bytes);
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
*/
//hipDeviceSynchronize();
// run gpu kernel
hipLaunchKernelGGL(( kernel_kmeans) , dim3(grdDim), dim3(blkDim) , 0, 0, data,
membership,
npoints,
nfeatures,
nclusters,
warps_per_blk,
delta,
new_membership,
new_clusters,
new_clusters_members);
hipDeviceSynchronize();
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
//printf("\nnew membership \n");
//print_array(new_membership, npoints, 1);
printf("\ndelta\n");
print_array(delta, 1, 1);
*/
// update the clusters on the host/cpu
for(int k=0; k<nclusters; k++) {
int startpos = k * nfeatures;
for(int f=0; f<nfeatures; f++) {
clusters[startpos + f] = new_clusters[startpos + f] / new_clusters_members[k];
}
}
/*
printf("\nupdated clusters\n");
print_array(clusters, nclusters, nfeatures);
*/
// check the termination condition
if(delta[0] < threshold) {
return 0;
}
// update clusters in the constant memsory
hipMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, hipMemcpyHostToDevice);
// update membership
hipMemcpy(membership, new_membership, membership_bytes, hipMemcpyDeviceToDevice);
return 1;
}
void KmeansGPU::getData_extern(int *membership_out, int &iterations_out, float *centroids_out)
{
//printf("loop count : %d\n", loop_count);
//printf("\noutput clusters\n");
//print_array(clusters, nclusters, nfeatures);
// copy new_membership (on the cpu) to the output
memcpy(membership_out, new_membership, membership_bytes);
// update iterations
iterations_out = loop_count;
// copy clusters (on the cpu) to the output
memcpy(centroids_out, clusters, clusters_bytes);
}
| kmeans.cuh | /**
* @file kmeans.cuh
* @author Yibo Lin
* @date Mar 2019
*/
#ifndef _DREAMPLACE_INDEPENDENT_SET_MATCHING_KMEANS_CUH
#define _DREAMPLACE_INDEPENDENT_SET_MATCHING_KMEANS_CUH
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
struct KMeansState
{
#ifdef DETERMINISTIC
typedef long long int coordinate_type;
#else
typedef T coordinate_type;
#endif
coordinate_type* centers_x; // To ensure determinism, use fixed point numbers
coordinate_type* centers_y;
T* weights;
int* partition_sizes;
int* node2centers_map;
int num_seeds;
#ifdef DETERMINISTIC
static constexpr T scale = 1024;
#else
static constexpr T scale = 1;
#endif
};
/// @brief A wrapper for atomicAdd
/// As CUDA atomicAdd does not support for long long int, using unsigned long long int is equivalent.
template <typename T>
inline __device__ T atomicAddWrapper(T* address, T value)
{
return atomicAdd(address, value);
}
/// @brief Template specialization for long long int
template <>
inline __device__ long long int atomicAddWrapper<long long int>(long long int* address, long long int value)
{
return atomicAdd((unsigned long long int*)address, (unsigned long long int)value);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void init_kmeans(const DetailedPlaceDBType& db, const IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
typedef typename DetailedPlaceDBType::type T;
allocateCUDA(kmeans_state.centers_x, state.batch_size, typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type);
allocateCUDA(kmeans_state.centers_y, state.batch_size, typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type);
allocateCUDA(kmeans_state.weights, state.batch_size, T);
allocateCUDA(kmeans_state.partition_sizes, state.batch_size, int);
allocateCUDA(kmeans_state.node2centers_map, db.num_movable_nodes, int);
}
template <typename T>
void destroy_kmeans(KMeansState<T>& kmeans_state)
{
destroyCUDA(kmeans_state.centers_x);
destroyCUDA(kmeans_state.centers_y);
destroyCUDA(kmeans_state.weights);
destroyCUDA(kmeans_state.partition_sizes);
destroyCUDA(kmeans_state.node2centers_map);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void prepare_kmeans(const DetailedPlaceDBType& db, const IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
kmeans_state.num_seeds = min(state.num_selected / state.set_size, state.batch_size);
// set weights to 1.0
fill_array(kmeans_state.weights, kmeans_state.num_seeds, (typename DetailedPlaceDBType::type)1.0);
}
template <typename T>
__inline__ __device__ T kmeans_distance(T node_x, T node_y, T center_x, T center_y)
{
T distance = fabs(node_x-center_x) + fabs(node_y-center_y);
return distance;
}
template <typename T>
struct ItemWithIndex
{
T value;
int index;
};
template <typename T>
struct ReduceMinOP
{
__host__ __device__ ItemWithIndex<T> operator()(const ItemWithIndex<T>& a, const ItemWithIndex<T>& b) const
{
return (a.value < b.value)? a : b;
}
};
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType, int ThreadsPerBlock=128>
__global__ void kmeans_find_centers_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
#ifdef DEBUG
assert(ThreadsPerBlock == blockDim.x);
#endif
assert(blockIdx.x < state.num_selected);
int node_id = state.selected_maximal_independent_set[blockIdx.x];
assert(node_id < db.num_movable_nodes);
auto node_x = db.x[node_id];
auto node_y = db.y[node_id];
typedef cub::BlockReduce<ItemWithIndex<typename DetailedPlaceDBType::type>, ThreadsPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
ItemWithIndex<typename DetailedPlaceDBType::type> thread_data;
thread_data.value = cuda::numeric_limits<typename DetailedPlaceDBType::type>::max();
thread_data.index = cuda::numeric_limits<int>::max();
for (int center_id = threadIdx.x; center_id < kmeans_state.num_seeds; center_id += ThreadsPerBlock)
{
assert(center_id < kmeans_state.num_seeds);
// scale back to floating point numbers
typename DetailedPlaceDBType::type center_x = kmeans_state.centers_x[center_id] / KMeansState<typename DetailedPlaceDBType::type>::scale;
typename DetailedPlaceDBType::type center_y = kmeans_state.centers_y[center_id] / KMeansState<typename DetailedPlaceDBType::type>::scale;
typename DetailedPlaceDBType::type weight = kmeans_state.weights[center_id];
typename DetailedPlaceDBType::type distance = kmeans_distance(node_x, node_y, center_x, center_y)*weight;
if (distance < thread_data.value)
{
thread_data.value = distance;
thread_data.index = center_id;
}
}
assert(thread_data.index < kmeans_state.num_seeds);
__syncthreads();
// Compute the block-wide max for thread0
ItemWithIndex<typename DetailedPlaceDBType::type> aggregate = BlockReduce(temp_storage).Reduce(thread_data, ReduceMinOP<typename DetailedPlaceDBType::type>(), kmeans_state.num_seeds);
__syncthreads();
if (threadIdx.x == 0)
{
assert(blockIdx.x < state.num_selected);
kmeans_state.node2centers_map[blockIdx.x] = aggregate.index;
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void init_kmeans_seeds_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < kmeans_state.num_seeds)
{
assert(db.num_movable_nodes-i-1 < db.num_movable_nodes && db.num_movable_nodes-i-1 >= 0);
int random_number = state.ordered_nodes[db.num_movable_nodes-i-1];
random_number = random_number % state.num_selected;
int node_id = state.selected_maximal_independent_set[random_number];
assert(node_id < db.num_movable_nodes);
// scale up for fixed point numbers
kmeans_state.centers_x[i] = db.x[node_id] * KMeansState<typename DetailedPlaceDBType::type>::scale;
kmeans_state.centers_y[i] = db.y[node_id] * KMeansState<typename DetailedPlaceDBType::type>::scale;
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void init_kmeans_seeds(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
init_kmeans_seeds_kernel<<<CPUCeilDiv(kmeans_state.num_seeds, 256), 256>>>(db, state, kmeans_state);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void reset_kmeans_partition_sizes_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < kmeans_state.num_seeds)
{
kmeans_state.partition_sizes[i] = 0;
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void compute_kmeans_partition_sizes_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < state.num_selected)
{
int center_id = kmeans_state.node2centers_map[i];
assert(center_id < kmeans_state.num_seeds);
atomicAdd(kmeans_state.partition_sizes+center_id, 1);
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void reset_kmeans_centers_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < kmeans_state.num_seeds)
{
if (kmeans_state.partition_sizes[i])
{
kmeans_state.centers_x[i] = 0;
kmeans_state.centers_y[i] = 0;
}
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void compute_kmeans_centers_sum_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < state.num_selected)
{
int node_id = state.selected_maximal_independent_set[i];
int center_id = kmeans_state.node2centers_map[i];
assert(center_id < kmeans_state.num_seeds);
assert(node_id < db.num_movable_nodes);
// scale up for fixed point numbers
atomicAddWrapper<typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type>(kmeans_state.centers_x+center_id, db.x[node_id] * KMeansState<typename DetailedPlaceDBType::type>::scale);
atomicAddWrapper<typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type>(kmeans_state.centers_y+center_id, db.y[node_id] * KMeansState<typename DetailedPlaceDBType::type>::scale);
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void compute_kmeans_centers_div_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < kmeans_state.num_seeds)
{
int s = kmeans_state.partition_sizes[i];
if (s)
{
kmeans_state.centers_x[i] /= s;
kmeans_state.centers_y[i] /= s;
}
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void kmeans_update_centers(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
// reset partition_sizes to 0
reset_kmeans_partition_sizes_kernel<<<CPUCeilDiv(kmeans_state.num_seeds, 256), 256>>>(db, state, kmeans_state);
// compute partition sizes
compute_kmeans_partition_sizes_kernel<<<CPUCeilDiv(state.num_selected, 256), 256>>>(db, state, kmeans_state);
// reset kmeans centers to 0
reset_kmeans_centers_kernel<<<CPUCeilDiv(kmeans_state.num_seeds, 256), 256>>>(db, state, kmeans_state);
// compute kmeans centers sum
compute_kmeans_centers_sum_kernel<<<CPUCeilDiv(state.num_selected, 256), 256>>>(db, state, kmeans_state);
// compute kmeans centers div
compute_kmeans_centers_div_kernel<<<CPUCeilDiv(kmeans_state.num_seeds, 256), 256>>>(db, state, kmeans_state);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
__global__ void compute_kmeans_weights_kernel(DetailedPlaceDBType db, IndependentSetMatchingStateType state, KMeansState<typename DetailedPlaceDBType::type> kmeans_state)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < kmeans_state.num_seeds)
{
int s = kmeans_state.partition_sizes[i];
auto& w = kmeans_state.weights[i];
if (s > state.set_size)
{
auto ratio = s / (typename DetailedPlaceDBType::type)state.set_size;
ratio = 1.0 + 0.5*log(ratio);
#ifdef DEBUG
printf("partition[%d] weight ratio %g, %d nodes\n", i, ratio, s);
#endif
w *= ratio;
}
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void kmeans_update_weights(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
compute_kmeans_weights_kernel<<<CPUCeilDiv(kmeans_state.num_seeds, 256), 256>>>(db, state, kmeans_state);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void kmeans_collect_sets(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
gather(state.num_selected, kmeans_state.num_seeds, state.set_size,
state.selected_maximal_independent_set, kmeans_state.node2centers_map,
state.independent_sets, state.independent_set_sizes);
// statistics
{
std::vector<int> independent_set_sizes (state.batch_size);
checkCUDA(cudaMemcpy(independent_set_sizes.data(), state.independent_set_sizes, sizeof(int)*state.batch_size, cudaMemcpyDeviceToHost));
dreamplacePrint(kDEBUG, "from %d nodes, collect %d sets, avg %d nodes, min/max %d/%d nodes\n",
state.num_selected, state.num_independent_sets,
std::accumulate(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets, 0) / state.num_independent_sets,
*std::min_element(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets),
*std::max_element(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets)
);
}
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void kmeans_collect_sets_cuda2cpu(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
std::vector<int> selected_nodes (state.num_selected);
checkCUDA(cudaMemcpy(selected_nodes.data(), state.selected_maximal_independent_set, sizeof(int)*state.num_selected, cudaMemcpyDeviceToHost));
std::vector<int> node2centers_map (state.num_selected);
checkCUDA(cudaMemcpy(node2centers_map.data(), kmeans_state.node2centers_map, sizeof(int)*state.num_selected, cudaMemcpyDeviceToHost));
std::vector<int> flat_independent_sets (state.batch_size*state.set_size, std::numeric_limits<int>::max());
std::vector<int> independent_set_sizes (state.batch_size, 0);
#if 0
// use nested arrays
std::vector<std::vector<int> > independent_sets (kmeans_state.num_seeds);
for (auto& independent_set : independent_sets)
{
independent_set.reserve(state.set_size);
}
//std::sort(selected_nodes.begin(), selected_nodes.end());
//selected_nodes.resize(std::distance(selected_nodes.begin(), std::unique(selected_nodes.begin(), selected_nodes.end())));
//dreamplaceAssert(selected_nodes.size() == (unsigned int)state.num_selected);
for (int i = 0; i < state.num_selected; ++i)
{
int node_id = selected_nodes.at(i);
int center_id = node2centers_map.at(i);
auto& independent_set = independent_sets.at(center_id);
if (independent_set.size() < (unsigned int)state.set_size)
{
independent_set.push_back(node_id);
}
}
// sort sets according to large to small
std::sort(independent_sets.begin(), independent_sets.end(),
[&](const std::vector<int>& s1, const std::vector<int>& s2){
return s1.size() > s2.size();
});
// prepare flat
for (int i = 0; i < state.num_independent_sets; ++i)
{
independent_set_sizes.at(i) = independent_sets.at(i).size();
dreamplaceAssert(independent_set_sizes.at(i) <= state.set_size);
for (unsigned int j = 0; j < independent_sets.at(i).size(); ++j)
{
flat_independent_sets.at(i*state.set_size + j) = independent_sets.at(i).at(j);
}
}
#endif
// directly use flat array
for (int i = 0; i < state.num_selected; ++i)
{
int node_id = selected_nodes.at(i);
int center_id = node2centers_map.at(i);
int& size = independent_set_sizes.at(center_id);
if (size < state.set_size)
{
flat_independent_sets.at(center_id*state.set_size + size) = node_id;
++size;
}
}
checkCUDA(cudaMemcpy(state.independent_sets, flat_independent_sets.data(), sizeof(int)*state.batch_size*state.set_size, cudaMemcpyHostToDevice));
checkCUDA(cudaMemcpy(state.independent_set_sizes, independent_set_sizes.data(), sizeof(int)*state.batch_size, cudaMemcpyHostToDevice));
// statistics
dreamplacePrint(kDEBUG, "from %d nodes, collect %d sets, avg %d nodes, min/max %d/%d nodes\n",
state.num_selected, state.num_independent_sets,
std::accumulate(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets, 0) / state.num_independent_sets,
*std::min_element(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets),
*std::max_element(independent_set_sizes.begin(), independent_set_sizes.begin()+state.num_independent_sets)
);
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
void partition_kmeans(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state, KMeansState<typename DetailedPlaceDBType::type>& kmeans_state)
{
prepare_kmeans(db, state, kmeans_state);
init_kmeans_seeds(db, state, kmeans_state);
for (int iter = 0; iter < 2; ++iter)
{
#ifdef DEBUG
std::vector<typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type> centers_x (kmeans_state.num_seeds);
std::vector<typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type> centers_y (kmeans_state.num_seeds);
checkCUDA(cudaMemcpy(centers_x.data(), kmeans_state.centers_x, sizeof(typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type)*kmeans_state.num_seeds, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(centers_y.data(), kmeans_state.centers_y, sizeof(typename KMeansState<typename DetailedPlaceDBType::type>::coordinate_type)*kmeans_state.num_seeds, cudaMemcpyDeviceToHost));
dreamplacePrint(kDEBUG, "centers[%d] = ", kmeans_state.num_seeds);
for (int i = 0; i < kmeans_state.num_seeds; ++i)
{
dreamplacePrint(kNONE, "(%g, %g) ", (double)centers_x.at(i), (double)centers_y.at(i));
}
dreamplacePrint(kNONE, "\n");
#endif
// for each node, find centers
kmeans_find_centers_kernel<DetailedPlaceDBType, IndependentSetMatchingStateType, 256><<<state.num_selected, 256>>>(db, state, kmeans_state);
// for each center, adjust itself
kmeans_update_centers(db, state, kmeans_state);
// for each partition, update weight
kmeans_update_weights(db, state, kmeans_state);
#ifdef DEBUG
std::vector<int> node2centers_map (state.num_selected);
checkCUDA(cudaMemcpy(node2centers_map.data(), kmeans_state.node2centers_map, sizeof(int)*state.num_selected, cudaMemcpyDeviceToHost));
std::vector<int> independent_set_sizes (kmeans_state.num_seeds, 0);
for (int i = 0; i < state.num_selected; ++i)
{
int center_id = node2centers_map.at(i);
independent_set_sizes.at(center_id) += 1;
}
for (int i = 0; i < kmeans_state.num_seeds; ++i)
{
dreamplacePrint(kNONE, "%d ", independent_set_sizes.at(i));
}
dreamplacePrint(kNONE, "\n");
dreamplacePrint(kDEBUG, "from %d nodes, collect %d sets, avg %d nodes, min/max %d/%d nodes\n",
state.num_selected, kmeans_state.num_seeds,
std::accumulate(independent_set_sizes.begin(), independent_set_sizes.begin()+kmeans_state.num_seeds, 0) / kmeans_state.num_seeds,
*std::min_element(independent_set_sizes.begin(), independent_set_sizes.begin()+kmeans_state.num_seeds),
*std::max_element(independent_set_sizes.begin(), independent_set_sizes.begin()+kmeans_state.num_seeds)
);
#endif
}
state.num_independent_sets = kmeans_state.num_seeds;
//kmeans_collect_sets(db, state, kmeans_state);
kmeans_collect_sets_cuda2cpu(db, state, kmeans_state);
}
DREAMPLACE_END_NAMESPACE
#endif
|
kmeans.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <kernel.cu>
#include <kmeans.hh>
//using namespace std;
// input from python:
// data, nclusters, tol (threshold), maxiters
// output:
// labels (membership), iterations, centroids (clusters)
KmeansGPU::KmeansGPU(float threshold_,
int cluster_num_,
int npoints_,
int nfeatures_,
int maxiter,
float* data_in
)
{
// tic();
threshold = threshold_;
nclusters = cluster_num_;
npoints = npoints_;
nfeatures = nfeatures_;
nloops = maxiter;
membership_bytes = npoints * sizeof(int);
clusters_bytes = nclusters * nfeatures * sizeof(float);
data_bytes = npoints * nfeatures * sizeof(float);
clusters_members_bytes = nclusters * sizeof(float);
// read data to unified memory
if(data != NULL) hipFree(data);
hipMallocManaged((void **)&data, data_bytes);
hipMemcpy(data, data_in, data_bytes, hipMemcpyHostToDevice);
// allocate membership
if(membership != NULL) hipFree(membership);
if(new_membership != NULL) hipFree(new_membership);
hipMallocManaged((void**)&membership, membership_bytes);
hipMallocManaged((void**)&new_membership, membership_bytes);
// allocate delta
if(delta != NULL) hipFree(delta);
hipMallocManaged((void**)&delta, sizeof(float));
// clusters, new clusters
if(clusters != NULL) hipFree(clusters);
if(new_clusters != NULL) hipFree(new_clusters);
if(new_clusters_members != NULL) hipFree(new_clusters_members);
hipMallocManaged((void**)&clusters, clusters_bytes);
hipMallocManaged((void**)&new_clusters, clusters_bytes);
hipMallocManaged((void**)&new_clusters_members, clusters_members_bytes);
//--------------------------------------------------------------------//
// pick the first [nclusters] samples as the initial clusters
//--------------------------------------------------------------------//
for(int i=0; i<nclusters; i++) {
for(int j=0; j<nfeatures; j++) {
clusters[i * nfeatures + j] = data_in[i * nfeatures + j];
}
}
// gpu kernel configuration
blocksize = 128;
warps_per_blk = blocksize >> 5;
blkDim = dim3(blocksize, 1, 1);
grdDim = dim3(BLK(npoints, blocksize), 1, 1);
/*
printf("\ndata_in\n");
print_array(data_in, npoints, nfeatures);
printf("\nclusters\n");
print_array(clusters, nclusters, nfeatures);
printf("threshold : %f\n", threshold);
printf("nclusters: %d\n", nclusters);
printf("nfeatures: %d\n", nfeatures);
printf("npoints: %d\n", npoints);
printf("maxiter: %d\n", nloops);
printf("warps per blk : %d\n", warps_per_blk);
printf("grd : %d, blk : %d\n", grdDim.x, blkDim.x);
*/
// toc();
}
KmeansGPU::~KmeansGPU() {
Cleanup();
}
void KmeansGPU::print_array(float *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%f ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::print_array(int *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%d ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::Cleanup() {
if(data != NULL) hipFree(data);
if(membership != NULL) hipFree(membership);
if(new_membership != NULL) hipFree(new_membership);
if(delta != NULL) hipFree(delta);
if(clusters != NULL) hipFree(clusters);
if(new_clusters != NULL) hipFree(new_clusters);
if(new_clusters_members != NULL) hipFree(new_clusters_members);
}
//----------------------------------------------------------------------------//
// Run Kmeans
//----------------------------------------------------------------------------//
void KmeansGPU::Run()
{
if (nclusters > npoints) {
fprintf(stderr, "Can't have more clusters (%d) than the points (%d)!\n",
nclusters, npoints);
Cleanup();
exit(1);
}
//----------------------//
// copy clusters to contant memory
//----------------------//
hipMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, hipMemcpyHostToDevice);
// the membership is intialized with 0
hipMemset(membership, 0, membership_bytes);
loop_count = 0;
int cnt = 1;
for(int i=0; i<nloops; i++)
{
cnt = Kmeans_gpu();
if(cnt == 0) break;
}
//printf("loop count : %d\n", loop_count);
}
//----------------------------------------------------------------------------//
// Run Kmeans : GPU Kernels
//----------------------------------------------------------------------------//
int KmeansGPU::Kmeans_gpu()
{
loop_count++;
// change to 0 for each iteration
delta[0] = 0.f;
// start from zero for each iteration
hipMemset(new_clusters, 0, clusters_bytes);
hipMemset(new_clusters_members, 0, clusters_members_bytes);
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
*/
//hipDeviceSynchronize();
// run gpu kernel
hipLaunchKernelGGL(( kernel_kmeans) , dim3(grdDim), dim3(blkDim) , 0, 0, data,
membership,
npoints,
nfeatures,
nclusters,
warps_per_blk,
delta,
new_membership,
new_clusters,
new_clusters_members);
hipDeviceSynchronize();
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
//printf("\nnew membership \n");
//print_array(new_membership, npoints, 1);
printf("\ndelta\n");
print_array(delta, 1, 1);
*/
// update the clusters on the host/cpu
for(int k=0; k<nclusters; k++) {
int startpos = k * nfeatures;
for(int f=0; f<nfeatures; f++) {
clusters[startpos + f] = new_clusters[startpos + f] / new_clusters_members[k];
}
}
/*
printf("\nupdated clusters\n");
print_array(clusters, nclusters, nfeatures);
*/
// check the termination condition
if(delta[0] < threshold) {
return 0;
}
// update clusters in the constant memsory
hipMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, hipMemcpyHostToDevice);
// update membership
hipMemcpy(membership, new_membership, membership_bytes, hipMemcpyDeviceToDevice);
return 1;
}
void KmeansGPU::getData_extern(int *membership_out, int &iterations_out, float *centroids_out)
{
//printf("loop count : %d\n", loop_count);
//printf("\noutput clusters\n");
//print_array(clusters, nclusters, nfeatures);
// copy new_membership (on the cpu) to the output
memcpy(membership_out, new_membership, membership_bytes);
// update iterations
iterations_out = loop_count;
// copy clusters (on the cpu) to the output
memcpy(centroids_out, clusters, clusters_bytes);
}
| kmeans.cu | #include <assert.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <kernel.cu>
#include <kmeans.hh>
//using namespace std;
// input from python:
// data, nclusters, tol (threshold), maxiters
// output:
// labels (membership), iterations, centroids (clusters)
KmeansGPU::KmeansGPU(float threshold_,
int cluster_num_,
int npoints_,
int nfeatures_,
int maxiter,
float* data_in
)
{
// tic();
threshold = threshold_;
nclusters = cluster_num_;
npoints = npoints_;
nfeatures = nfeatures_;
nloops = maxiter;
membership_bytes = npoints * sizeof(int);
clusters_bytes = nclusters * nfeatures * sizeof(float);
data_bytes = npoints * nfeatures * sizeof(float);
clusters_members_bytes = nclusters * sizeof(float);
// read data to unified memory
if(data != NULL) cudaFree(data);
cudaMallocManaged((void **)&data, data_bytes);
cudaMemcpy(data, data_in, data_bytes, cudaMemcpyHostToDevice);
// allocate membership
if(membership != NULL) cudaFree(membership);
if(new_membership != NULL) cudaFree(new_membership);
cudaMallocManaged((void**)&membership, membership_bytes);
cudaMallocManaged((void**)&new_membership, membership_bytes);
// allocate delta
if(delta != NULL) cudaFree(delta);
cudaMallocManaged((void**)&delta, sizeof(float));
// clusters, new clusters
if(clusters != NULL) cudaFree(clusters);
if(new_clusters != NULL) cudaFree(new_clusters);
if(new_clusters_members != NULL) cudaFree(new_clusters_members);
cudaMallocManaged((void**)&clusters, clusters_bytes);
cudaMallocManaged((void**)&new_clusters, clusters_bytes);
cudaMallocManaged((void**)&new_clusters_members, clusters_members_bytes);
//--------------------------------------------------------------------//
// pick the first [nclusters] samples as the initial clusters
//--------------------------------------------------------------------//
for(int i=0; i<nclusters; i++) {
for(int j=0; j<nfeatures; j++) {
clusters[i * nfeatures + j] = data_in[i * nfeatures + j];
}
}
// gpu kernel configuration
blocksize = 128;
warps_per_blk = blocksize >> 5;
blkDim = dim3(blocksize, 1, 1);
grdDim = dim3(BLK(npoints, blocksize), 1, 1);
/*
printf("\ndata_in\n");
print_array(data_in, npoints, nfeatures);
printf("\nclusters\n");
print_array(clusters, nclusters, nfeatures);
printf("threshold : %f\n", threshold);
printf("nclusters: %d\n", nclusters);
printf("nfeatures: %d\n", nfeatures);
printf("npoints: %d\n", npoints);
printf("maxiter: %d\n", nloops);
printf("warps per blk : %d\n", warps_per_blk);
printf("grd : %d, blk : %d\n", grdDim.x, blkDim.x);
*/
// toc();
}
KmeansGPU::~KmeansGPU() {
Cleanup();
}
void KmeansGPU::print_array(float *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%f ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::print_array(int *array, int row, int col) {
for(int i=0; i<row; i++) {
int startpos = i * col;
for(int j=0; j<col; j++) {
printf("%d ", array[startpos + j]);
}
printf("\n");
}
printf("\n");
}
void KmeansGPU::Cleanup() {
if(data != NULL) cudaFree(data);
if(membership != NULL) cudaFree(membership);
if(new_membership != NULL) cudaFree(new_membership);
if(delta != NULL) cudaFree(delta);
if(clusters != NULL) cudaFree(clusters);
if(new_clusters != NULL) cudaFree(new_clusters);
if(new_clusters_members != NULL) cudaFree(new_clusters_members);
}
//----------------------------------------------------------------------------//
// Run Kmeans
//----------------------------------------------------------------------------//
void KmeansGPU::Run()
{
if (nclusters > npoints) {
fprintf(stderr, "Can't have more clusters (%d) than the points (%d)!\n",
nclusters, npoints);
Cleanup();
exit(1);
}
//----------------------//
// copy clusters to contant memory
//----------------------//
cudaMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, cudaMemcpyHostToDevice);
// the membership is intialized with 0
cudaMemset(membership, 0, membership_bytes);
loop_count = 0;
int cnt = 1;
for(int i=0; i<nloops; i++)
{
cnt = Kmeans_gpu();
if(cnt == 0) break;
}
//printf("loop count : %d\n", loop_count);
}
//----------------------------------------------------------------------------//
// Run Kmeans : GPU Kernels
//----------------------------------------------------------------------------//
int KmeansGPU::Kmeans_gpu()
{
loop_count++;
// change to 0 for each iteration
delta[0] = 0.f;
// start from zero for each iteration
cudaMemset(new_clusters, 0, clusters_bytes);
cudaMemset(new_clusters_members, 0, clusters_members_bytes);
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
*/
//cudaDeviceSynchronize();
// run gpu kernel
kernel_kmeans <<< grdDim, blkDim >>> (data,
membership,
npoints,
nfeatures,
nclusters,
warps_per_blk,
delta,
new_membership,
new_clusters,
new_clusters_members);
cudaDeviceSynchronize();
/*
printf("\nnew clusters\n");
print_array(clusters, nclusters, nfeatures);
printf("\nnew cluster member\n");
print_array(new_clusters_members, nclusters, 1);
//printf("\nnew membership \n");
//print_array(new_membership, npoints, 1);
printf("\ndelta\n");
print_array(delta, 1, 1);
*/
// update the clusters on the host/cpu
for(int k=0; k<nclusters; k++) {
int startpos = k * nfeatures;
for(int f=0; f<nfeatures; f++) {
clusters[startpos + f] = new_clusters[startpos + f] / new_clusters_members[k];
}
}
/*
printf("\nupdated clusters\n");
print_array(clusters, nclusters, nfeatures);
*/
// check the termination condition
if(delta[0] < threshold) {
return 0;
}
// update clusters in the constant memsory
cudaMemcpyToSymbol(clusters_cnst, clusters, clusters_bytes, 0, cudaMemcpyHostToDevice);
// update membership
cudaMemcpy(membership, new_membership, membership_bytes, cudaMemcpyDeviceToDevice);
return 1;
}
void KmeansGPU::getData_extern(int *membership_out, int &iterations_out, float *centroids_out)
{
//printf("loop count : %d\n", loop_count);
//printf("\noutput clusters\n");
//print_array(clusters, nclusters, nfeatures);
// copy new_membership (on the cpu) to the output
memcpy(membership_out, new_membership, membership_bytes);
// update iterations
iterations_out = loop_count;
// copy clusters (on the cpu) to the output
memcpy(centroids_out, clusters, clusters_bytes);
}
|
31305c956556a332aab7783a324ae3e157702925.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../inc/RayTracer_Surface.cuh"
// Define
// ...
// Static Data
// ...
// Static Function Prototype
// TODO: rename
__global__ static void setTextureImage_data (Texture_Image *texture, Vec3f *data, int32_t w, int32_t h);
// Operation Handling
__host__ bool Surface::convertToTexture(Texture_Image *texture) {
// check if data and texture exist or not
if (data == nullptr) return false;
if (texture == nullptr) return false;
// TODO: currently can only handle RGB_888
if (bit_pixel != 24) return false;
// malloc for temp space
Vec3f *color_host;
Vec3f *color_device;
color_host = new Vec3f[width * height];
hipMalloc(&color_device, width * height * sizeof(Vec3f));
// texture use RGB_888
fp_t color_r, color_g, color_b;
const int32_t offset_pixel = (bit_pixel + 7) / 8;
const int32_t offset_r = 0;
const int32_t offset_g = 1;
const int32_t offset_b = 2;
for (int i = 0; i < width * height; i++) {
// get RGB and convert it double
// where the range is [0, 1] (0, 1 is inclusive)
// TODO: currently assume the range of a single channel is [0, 255]
color_r = (fp_t)(data[i * offset_pixel + offset_r]) / 255.0;
color_g = (fp_t)(data[i * offset_pixel + offset_g]) / 255.0;
color_b = (fp_t)(data[i * offset_pixel + offset_b]) / 255.0;
color_host[i] = Vec3f(color_r, color_g, color_b);
}
// memcpy to device and call kernel
hipMemcpy(color_device, color_host, width * height * sizeof(Vec3f), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( setTextureImage_data) , dim3(1), dim3(1) , 0, 0, texture, color_device, width, height);
// free for temp space
delete[] color_host;
return true;
}
// Static Function Implementation
__global__ static void setTextureImage_data(Texture_Image *texture, Vec3f *data, int32_t w, int32_t h) {
texture->color = data;
texture->width = w;
texture->height = h;
}
| 31305c956556a332aab7783a324ae3e157702925.cu | #include "../inc/RayTracer_Surface.cuh"
// Define
// ...
// Static Data
// ...
// Static Function Prototype
// TODO: rename
__global__ static void setTextureImage_data (Texture_Image *texture, Vec3f *data, int32_t w, int32_t h);
// Operation Handling
__host__ bool Surface::convertToTexture(Texture_Image *texture) {
// check if data and texture exist or not
if (data == nullptr) return false;
if (texture == nullptr) return false;
// TODO: currently can only handle RGB_888
if (bit_pixel != 24) return false;
// malloc for temp space
Vec3f *color_host;
Vec3f *color_device;
color_host = new Vec3f[width * height];
cudaMalloc(&color_device, width * height * sizeof(Vec3f));
// texture use RGB_888
fp_t color_r, color_g, color_b;
const int32_t offset_pixel = (bit_pixel + 7) / 8;
const int32_t offset_r = 0;
const int32_t offset_g = 1;
const int32_t offset_b = 2;
for (int i = 0; i < width * height; i++) {
// get RGB and convert it double
// where the range is [0, 1] (0, 1 is inclusive)
// TODO: currently assume the range of a single channel is [0, 255]
color_r = (fp_t)(data[i * offset_pixel + offset_r]) / 255.0;
color_g = (fp_t)(data[i * offset_pixel + offset_g]) / 255.0;
color_b = (fp_t)(data[i * offset_pixel + offset_b]) / 255.0;
color_host[i] = Vec3f(color_r, color_g, color_b);
}
// memcpy to device and call kernel
cudaMemcpy(color_device, color_host, width * height * sizeof(Vec3f), cudaMemcpyHostToDevice);
setTextureImage_data <<< 1, 1 >>> (texture, color_device, width, height);
// free for temp space
delete[] color_host;
return true;
}
// Static Function Implementation
__global__ static void setTextureImage_data(Texture_Image *texture, Vec3f *data, int32_t w, int32_t h) {
texture->color = data;
texture->width = w;
texture->height = h;
}
|
e9a4190d93b8349a56b1990e9dcaa2258bcf60e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MUL_KERNEL
#define _MUL_KERNEL
#include <stdio.h>
#include "mul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernelA1(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
__shared__ float Mds[BLOCKSIZE][BLOCKSIZE];
__shared__ float Nds[BLOCKSIZE][BLOCKSIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for(int m=0; m<(M.width-1)/BLOCKSIZE+1; m++)
{
if((m*BLOCKSIZE+threadIdx.x)<M.width&&row<M.height)
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*BLOCKSIZE+threadIdx.x)];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
if((m*BLOCKSIZE+threadIdx.y)<N.height&&col<N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*BLOCKSIZE+threadIdx.y)*N.width+col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int n=0; n<BLOCKSIZE; n++)
{
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
}
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width+col] = p_sum;
}
__global__ void MatrixMulKernelA2(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
__shared__ float Mds[BLOCKSIZE][BLOCKSIZE];
__shared__ float Nds[BLOCKSIZE][BLOCKSIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for( int m=0; m<M.width/BLOCKSIZE; m++ )
{
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*BLOCKSIZE+threadIdx.x)];
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*BLOCKSIZE+threadIdx.y)*N.width+col];
__syncthreads();
for(int n=0; n<BLOCKSIZE; n++)
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
__syncthreads();
}
P.elements[ row*P.width+col ] = p_sum;
}
__global__ void MatrixMulKernelB(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[bm][bk];
__shared__ float Nds[bk][bn];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for(int m=0; m<(M.width-1)/bm+1; m++)
{
if( threadIdx.x<bk )
{
if((m*bm+threadIdx.x)<M.width&&row<M.height&&threadIdx.x<bk)
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*bm+threadIdx.x)];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
}
if((m*bm+threadIdx.y)<N.height&&col<N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*bm+threadIdx.y)*N.width+col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int n=0; n<bk; n++)
{
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
}
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width+col] = p_sum;
}
#endif
| e9a4190d93b8349a56b1990e9dcaa2258bcf60e3.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MUL_KERNEL
#define _MUL_KERNEL
#include <stdio.h>
#include "mul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernelA1(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
__shared__ float Mds[BLOCKSIZE][BLOCKSIZE];
__shared__ float Nds[BLOCKSIZE][BLOCKSIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for(int m=0; m<(M.width-1)/BLOCKSIZE+1; m++)
{
if((m*BLOCKSIZE+threadIdx.x)<M.width&&row<M.height)
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*BLOCKSIZE+threadIdx.x)];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
if((m*BLOCKSIZE+threadIdx.y)<N.height&&col<N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*BLOCKSIZE+threadIdx.y)*N.width+col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int n=0; n<BLOCKSIZE; n++)
{
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
}
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width+col] = p_sum;
}
__global__ void MatrixMulKernelA2(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
__shared__ float Mds[BLOCKSIZE][BLOCKSIZE];
__shared__ float Nds[BLOCKSIZE][BLOCKSIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for( int m=0; m<M.width/BLOCKSIZE; m++ )
{
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*BLOCKSIZE+threadIdx.x)];
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*BLOCKSIZE+threadIdx.y)*N.width+col];
__syncthreads();
for(int n=0; n<BLOCKSIZE; n++)
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
__syncthreads();
}
P.elements[ row*P.width+col ] = p_sum;
}
__global__ void MatrixMulKernelB(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[bm][bk];
__shared__ float Nds[bk][bn];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float p_sum=0.0f;
for(int m=0; m<(M.width-1)/bm+1; m++)
{
if( threadIdx.x<bk )
{
if((m*bm+threadIdx.x)<M.width&&row<M.height&&threadIdx.x<bk)
Mds[threadIdx.y][threadIdx.x] = M.elements[row*M.width+(m*bm+threadIdx.x)];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
}
if((m*bm+threadIdx.y)<N.height&&col<N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(m*bm+threadIdx.y)*N.width+col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int n=0; n<bk; n++)
{
p_sum += Mds[threadIdx.y][n]*Nds[n][threadIdx.x];
}
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width+col] = p_sum;
}
#endif
|
24eee5fe001bd5affd4c09900b2febc1446e1d48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define KB 1024
#define MB 1048576
#define ASYNC_V1 1
#define ASYNC_V2 2
#define ASYNC_V3 3
static char *sAsyncMethod[] =
{
"0 (None, Sequential)",
"1 (Async V1)",
"2 (Async V2)",
"3 (Async V3)",
NULL
};
/*************************************************
timeBurningKernel
*************************************************/
#define BURNING 1050
__global__ void timeBurningKernel(float *d_a, float *d_r, float factor, int N)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if( gid < N ) {
for( int i = 0 ; i < BURNING ; ++i )
d_r[gid] = d_a[gid] + factor * factor;
}
}
/*************************************************
main
*************************************************/
int main(int argc, char* argv[])
{
if( argc < 2 ) {
puts("usage: ./a.out [Async Mode]");
return 0;
}
const int niterations = 2*3*4; // number of iterations for the loop inside the kernel
int nstreams = 3;
int async_mode = ASYNC_V1; //default Async_V1
float factor = 1.1;
int N = 4*MB;
if( argc > 1 ) async_mode = atoi(argv[1]);
if( async_mode == 0 ) nstreams = 1;
printf("N: %d\n", N );
printf("# BURNING: %d\n", BURNING );
printf("# iterations: %d\n", niterations );
printf("# streams: %d\n", nstreams );
printf("ASync method: %s\n", sAsyncMethod[async_mode]);
//Total size
size_t sz = 128 * MB;
if( (sz/sizeof(float)) < BURNING ) {
printf("error: 'sz' must be larger than BURNING\n");
exit(-1);
}
//Struct for time measure
struct timeval start, end, timer;
// TODO: allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t*) malloc(nstreams * sizeof(hipStream_t));
for(int i = 0; i < nstreams; i++) {
hipStreamCreate(&(streams[i]));
}
// Memory allocation for cpu (host)
// Pinned memory (page-locked)
float *h_a[niterations];
float *h_r[niterations];
for( int i = 0 ; i < niterations ; ++i ) {
hipHostMalloc((void**)&h_a[i], sz);
hipHostMalloc((void**)&h_r[i], sz);
}
srand(time(NULL));
for( int j = 0 ; j < niterations ; ++j ) {
for(int i = 0 ; i < N*N; i++ ) {
h_a[j][i] = (float)(rand()%100);
h_r[j][i] = 0.;
}
}
//Memory allocation for gpu(device)
float *d_a[nstreams], *d_r[nstreams];
for( int j = 0 ; j < nstreams ; ++j ) {
hipMalloc((void **) &d_a[j], sz );
hipMalloc((void **) &d_r[j], sz );
}
/*************************************************
Launching timeBurningKernel
*************************************************/
size_t dim_threads = 256;
size_t dim_grid = ((N%dim_threads)? N/dim_threads+1 : N/dim_threads);
hipDeviceSynchronize();
gettimeofday(&start, NULL);
if(nstreams == 1 ) {
for( int i =0 ; i < niterations ; i ++ ) {
}
}
else {
if(async_mode == ASYNC_V1 )
{
for(int i = 0; i < niterations; i += nstreams) {
for(int j = 0; j < nstreams; ++j) {
hipMemcpyAsync(d_a[j], h_a[i+j], sz, hipMemcpyHostToDevice, streams[j]);
hipLaunchKernelGGL(( timeBurningKernel), dim3(dim_grid), dim3(dim_threads), 0, streams[j] , d_a[j], d_r[j], factor, N);
hipMemcpyAsync(h_r[i+j], d_r[j], sz, hipMemcpyDeviceToHost, streams[j]);
}
}
}
else if(async_mode == ASYNC_V2)
{
}
else
// Async V3
{
}
}
hipDeviceSynchronize();
gettimeofday(&end, NULL);
timersub(&end,&start,&timer);
printf("%d, elapsed time: %lf\n", niterations, (timer.tv_usec / 1000.0 + timer.tv_sec *1000.0) );
for(int i=0; i<niterations; i++) {
hipHostFree(h_r[i]);
hipHostFree(h_a[i]);
}
for(int i=0; i<nstreams; i++) {
hipFree(d_r[i]);
hipFree(d_a[i]);
}
return 0;
}
| 24eee5fe001bd5affd4c09900b2febc1446e1d48.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define KB 1024
#define MB 1048576
#define ASYNC_V1 1
#define ASYNC_V2 2
#define ASYNC_V3 3
static char *sAsyncMethod[] =
{
"0 (None, Sequential)",
"1 (Async V1)",
"2 (Async V2)",
"3 (Async V3)",
NULL
};
/*************************************************
timeBurningKernel
*************************************************/
#define BURNING 1050
__global__ void timeBurningKernel(float *d_a, float *d_r, float factor, int N)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if( gid < N ) {
for( int i = 0 ; i < BURNING ; ++i )
d_r[gid] = d_a[gid] + factor * factor;
}
}
/*************************************************
main
*************************************************/
int main(int argc, char* argv[])
{
if( argc < 2 ) {
puts("usage: ./a.out [Async Mode]");
return 0;
}
const int niterations = 2*3*4; // number of iterations for the loop inside the kernel
int nstreams = 3;
int async_mode = ASYNC_V1; //default Async_V1
float factor = 1.1;
int N = 4*MB;
if( argc > 1 ) async_mode = atoi(argv[1]);
if( async_mode == 0 ) nstreams = 1;
printf("N: %d\n", N );
printf("# BURNING: %d\n", BURNING );
printf("# iterations: %d\n", niterations );
printf("# streams: %d\n", nstreams );
printf("ASync method: %s\n", sAsyncMethod[async_mode]);
//Total size
size_t sz = 128 * MB;
if( (sz/sizeof(float)) < BURNING ) {
printf("error: 'sz' must be larger than BURNING\n");
exit(-1);
}
//Struct for time measure
struct timeval start, end, timer;
// TODO: allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t));
for(int i = 0; i < nstreams; i++) {
cudaStreamCreate(&(streams[i]));
}
// Memory allocation for cpu (host)
// Pinned memory (page-locked)
float *h_a[niterations];
float *h_r[niterations];
for( int i = 0 ; i < niterations ; ++i ) {
cudaMallocHost((void**)&h_a[i], sz);
cudaMallocHost((void**)&h_r[i], sz);
}
srand(time(NULL));
for( int j = 0 ; j < niterations ; ++j ) {
for(int i = 0 ; i < N*N; i++ ) {
h_a[j][i] = (float)(rand()%100);
h_r[j][i] = 0.;
}
}
//Memory allocation for gpu(device)
float *d_a[nstreams], *d_r[nstreams];
for( int j = 0 ; j < nstreams ; ++j ) {
cudaMalloc((void **) &d_a[j], sz );
cudaMalloc((void **) &d_r[j], sz );
}
/*************************************************
Launching timeBurningKernel
*************************************************/
size_t dim_threads = 256;
size_t dim_grid = ((N%dim_threads)? N/dim_threads+1 : N/dim_threads);
cudaDeviceSynchronize();
gettimeofday(&start, NULL);
if(nstreams == 1 ) {
for( int i =0 ; i < niterations ; i ++ ) {
}
}
else {
if(async_mode == ASYNC_V1 )
{
for(int i = 0; i < niterations; i += nstreams) {
for(int j = 0; j < nstreams; ++j) {
cudaMemcpyAsync(d_a[j], h_a[i+j], sz, cudaMemcpyHostToDevice, streams[j]);
timeBurningKernel<<< dim_grid, dim_threads, 0, streams[j] >>>(d_a[j], d_r[j], factor, N);
cudaMemcpyAsync(h_r[i+j], d_r[j], sz, cudaMemcpyDeviceToHost, streams[j]);
}
}
}
else if(async_mode == ASYNC_V2)
{
}
else
// Async V3
{
}
}
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
timersub(&end,&start,&timer);
printf("%d, elapsed time: %lf\n", niterations, (timer.tv_usec / 1000.0 + timer.tv_sec *1000.0) );
for(int i=0; i<niterations; i++) {
cudaFreeHost(h_r[i]);
cudaFreeHost(h_a[i]);
}
for(int i=0; i<nstreams; i++) {
cudaFree(d_r[i]);
cudaFree(d_a[i]);
}
return 0;
}
|
311a795d66718569c4244d4e134209641a187170.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_copyPredictorTo20.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int32_t *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
uint8_t *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
uint32_t stride = 2;
uint32_t *numSamples = NULL;
hipMalloc(&numSamples, XSIZE*YSIZE);
int32_t theOutputPacketBytes = 1;
uint32_t frameLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_copyPredictorTo20), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_copyPredictorTo20), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_copyPredictorTo20), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 311a795d66718569c4244d4e134209641a187170.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_copyPredictorTo20.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int32_t *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
uint8_t *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
uint32_t stride = 2;
uint32_t *numSamples = NULL;
cudaMalloc(&numSamples, XSIZE*YSIZE);
int32_t theOutputPacketBytes = 1;
uint32_t frameLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_copyPredictorTo20<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_copyPredictorTo20<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_copyPredictorTo20<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d05d0c429545327d26afc32aed253c3b57e56b13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "global.h"
#include "util.h"
#include "constants.h"
#include "tree_struct.h"
#include "treecode_kernel.h"
#define PIXEL_SIZE (512)
__global__ void glensing(const float4 * lenses, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int lens_idx = blockIdx.y*blockDim.y + blockIdx.x*nobjects; //+ blockIdx.y*blockDim.y;
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x, start_y, dx, dy;
unsigned int it, noise_x, noise_y;
size_t k;
float dist;
// TODO: Perform multiple ray calculations simultaneously
// BUG: A larger value (> 100) of rpp results in a completely blank image
for(it = 0; it < v->rpp; ++it) {
noise_x = it % uniform_box;
noise_y = it / uniform_box;
start_x = initial_x + noise_x * v->increment_x / uniform_box;
start_y = initial_y + noise_y * v->increment_y / uniform_box;
dx = (1-v->gamma_)*start_x - v->kappa_c*start_x;
dy = (1+v->gamma_)*start_y - v->kappa_c*start_y;
for(k = 0; k < nobjects; ++k) {
dist = pow(start_x - lenses[lens_idx + k].x, 2) + pow(start_y - lenses[lens_idx + k].y, 2);
dx -= lenses[lens_idx + k].w * (start_x - lenses[lens_idx + k].x) / dist;
dy -= lenses[lens_idx + k].w * (start_y - lenses[lens_idx + k].y) / dist;
}
const float source_scale = v->source_scale;
if ((dx >= -source_scale/2) && (dx <= source_scale/2) &&
(dy >= -source_scale/2) && (dy <= source_scale/2)) {
int px = (dx + source_scale/2) / (source_scale/PIXEL_SIZE);
int py = PIXEL_SIZE - (dy + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
//results[py * PIXEL_SIZE + px] += 1;
}
}
}
| d05d0c429545327d26afc32aed253c3b57e56b13.cu | #include "global.h"
#include "util.h"
#include "constants.h"
#include "tree_struct.h"
#include "treecode_kernel.h"
#define PIXEL_SIZE (512)
__global__ void glensing(const float4 * lenses, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int lens_idx = blockIdx.y*blockDim.y + blockIdx.x*nobjects; //+ blockIdx.y*blockDim.y;
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x, start_y, dx, dy;
unsigned int it, noise_x, noise_y;
size_t k;
float dist;
// TODO: Perform multiple ray calculations simultaneously
// BUG: A larger value (> 100) of rpp results in a completely blank image
for(it = 0; it < v->rpp; ++it) {
noise_x = it % uniform_box;
noise_y = it / uniform_box;
start_x = initial_x + noise_x * v->increment_x / uniform_box;
start_y = initial_y + noise_y * v->increment_y / uniform_box;
dx = (1-v->gamma_)*start_x - v->kappa_c*start_x;
dy = (1+v->gamma_)*start_y - v->kappa_c*start_y;
for(k = 0; k < nobjects; ++k) {
dist = pow(start_x - lenses[lens_idx + k].x, 2) + pow(start_y - lenses[lens_idx + k].y, 2);
dx -= lenses[lens_idx + k].w * (start_x - lenses[lens_idx + k].x) / dist;
dy -= lenses[lens_idx + k].w * (start_y - lenses[lens_idx + k].y) / dist;
}
const float source_scale = v->source_scale;
if ((dx >= -source_scale/2) && (dx <= source_scale/2) &&
(dy >= -source_scale/2) && (dy <= source_scale/2)) {
int px = (dx + source_scale/2) / (source_scale/PIXEL_SIZE);
int py = PIXEL_SIZE - (dy + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
//results[py * PIXEL_SIZE + px] += 1;
}
}
}
|
cf7afaee53e488116ca5cec1ef02643edc7c8dfa.hip | // !!! This is a file automatically generated by hipify!!!
//fd_grid.cpp
/*
* Created by: Min Basnet
* 2020.April.13
* Kathmandu, Nepal
*/
#include <iostream>
#include <cmath>
#include "fd_medium.cuh"
#include "INIReader.cuh"
//#include "../ext/inih/INIReader.cuh"
#include "util.hip"
void FDMedium::parse_configuration(const char* configuration_file_relative_path) {
//
std::cout << "parsing FD MEDIUM from file '"
<< configuration_file_relative_path << "'." << std::endl;
INIReader reader(configuration_file_relative_path);
if (reader.ParseError() < 0) {
std::cout << "Can't load .ini file\n";
exit(1);
}
// Default medium Static Material Parameters
scalar_rho = reader.GetReal("medium", "scalar_rho", 0.0);
scalar_vp = reader.GetReal("medium", "scalar_vp", 0.0);
scalar_vs = reader.GetReal("medium", "scalar_vs", 0.0);
velocity_to_lame(); // compute scalar Lame paramaters
std::cout << std::endl << "Medium:" << std::endl;
std::cout << "density = " << scalar_rho << std::endl;
std::cout << "Vp = " << scalar_vp << ", Vs = " << scalar_vs << std::endl;
std::cout << "Lam's parameters: lamda = " << scalar_lam << ", mu = " << scalar_mu << std::endl;
std::cout << std::endl;
}
void FDMedium::velocity_to_lame() {
// Produces material grid of rho, mu and lamda
scalar_mu = real_sim(pow(scalar_vs, 2)) * scalar_rho;
scalar_lam = real_sim(pow(scalar_vp, 2)) * scalar_rho - 2.0 * scalar_mu;
}
void FDMedium::stagger_over_grid(int dimz, int dimx) {
// Prepares Lame parameters and density over staggered grid
// dimx and dimz should include grids required for pml and fd order
for (int ix = 0; ix < dimx; ix++) {
for (int iz = 0; iz < dimz; iz++) {
rho[iz][ix] = scalar_rho;
lam[iz][ix] = scalar_lam;
mu[iz][ix] = scalar_mu;
}
}
}
void FDMedium::average_parameters(int dimz, int dimx) {
// Harmonic 2d average of mu and
// Arithmatic 1d average of rho
for (int ix = 0; ix < dimx; ix++) {
for (int iz = 0; iz < dimz; iz++) {
// Harmonic average for mu
mu_zx[iz][ix] = 4.0 / ((1.0 / mu[iz][ix]) +
(1.0 / mu[iz][ix + 1]) + (1.0 / mu[iz + 1][ix]) + (1.0 / mu[iz + 1][ix + 1]));
if ((mu[iz][ix] == 0.0) || (mu[iz][ix + 1] == 0.0) || (mu[iz + 1][ix] == 0.0) || (mu[iz + 1][ix + 1] == 0.0)) {
mu_zx[iz][ix] = 0.0;
}
// Arithmatic average of rho
// the averages are inversed for computational efficiency
rho_zp[iz][ix] = 1.0 / (0.5 * (rho[iz][ix] + rho[iz + 1][ix]));
rho_xp[iz][ix] = 1.0 / (0.5 * (rho[iz][ix] + rho[iz][ix + 1]));
if ((rho[iz][ix] < 1e-4) && (rho[iz + 1][ix] < 1e-4)) {
rho_zp[iz][ix] = 0.0;
}
if ((rho[iz][ix] < 1e-4) && (rho[iz][ix + 1] < 1e-4)) {
rho_zp[iz][ix] = 0.0;
}
}
}
}
void FDMedium::allocate_medium(int dimz, int dimx) {
// Allocates the basic material parameters (Lame parameters)
allocate_array_2d(rho, dimz, dimx);
allocate_array_2d(lam, dimz, dimx);
allocate_array_2d(mu, dimz, dimx);
}
void FDMedium::allocate_medium_av(int dimz, int dimx) {
// Allocates the averaged material parameters
allocate_array_2d(rho_zp, dimz, dimx);
allocate_array_2d(rho_xp, dimz, dimx);
allocate_array_2d(mu_zx, dimz, dimx);
}
void FDMedium::deallocate_medium(int dimz) {
// Allocates the basic material parameters (Lame parameters)
deallocate_array_2d(rho, dimz);
deallocate_array_2d(lam, dimz);
deallocate_array_2d(mu, dimz);
}
void FDMedium::deallocate_medium_av(int dimz) {
// Allocates the averaged material parameters
deallocate_array_2d(rho_zp, dimz);
deallocate_array_2d(rho_xp, dimz);
deallocate_array_2d(mu_zx, dimz);
} | cf7afaee53e488116ca5cec1ef02643edc7c8dfa.cu | //fd_grid.cpp
/*
* Created by: Min Basnet
* 2020.April.13
* Kathmandu, Nepal
*/
#include <iostream>
#include <cmath>
#include "fd_medium.cuh"
#include "INIReader.cuh"
//#include "../ext/inih/INIReader.cuh"
#include "util.cu"
void FDMedium::parse_configuration(const char* configuration_file_relative_path) {
//
std::cout << "parsing FD MEDIUM from file '"
<< configuration_file_relative_path << "'." << std::endl;
INIReader reader(configuration_file_relative_path);
if (reader.ParseError() < 0) {
std::cout << "Can't load .ini file\n";
exit(1);
}
// Default medium Static Material Parameters
scalar_rho = reader.GetReal("medium", "scalar_rho", 0.0);
scalar_vp = reader.GetReal("medium", "scalar_vp", 0.0);
scalar_vs = reader.GetReal("medium", "scalar_vs", 0.0);
velocity_to_lame(); // compute scalar Lame paramaters
std::cout << std::endl << "Medium:" << std::endl;
std::cout << "density = " << scalar_rho << std::endl;
std::cout << "Vp = " << scalar_vp << ", Vs = " << scalar_vs << std::endl;
std::cout << "Lamé's parameters: lamda = " << scalar_lam << ", mu = " << scalar_mu << std::endl;
std::cout << std::endl;
}
void FDMedium::velocity_to_lame() {
// Produces material grid of rho, mu and lamda
scalar_mu = real_sim(pow(scalar_vs, 2)) * scalar_rho;
scalar_lam = real_sim(pow(scalar_vp, 2)) * scalar_rho - 2.0 * scalar_mu;
}
void FDMedium::stagger_over_grid(int dimz, int dimx) {
// Prepares Lame parameters and density over staggered grid
// dimx and dimz should include grids required for pml and fd order
for (int ix = 0; ix < dimx; ix++) {
for (int iz = 0; iz < dimz; iz++) {
rho[iz][ix] = scalar_rho;
lam[iz][ix] = scalar_lam;
mu[iz][ix] = scalar_mu;
}
}
}
void FDMedium::average_parameters(int dimz, int dimx) {
// Harmonic 2d average of mu and
// Arithmatic 1d average of rho
for (int ix = 0; ix < dimx; ix++) {
for (int iz = 0; iz < dimz; iz++) {
// Harmonic average for mu
mu_zx[iz][ix] = 4.0 / ((1.0 / mu[iz][ix]) +
(1.0 / mu[iz][ix + 1]) + (1.0 / mu[iz + 1][ix]) + (1.0 / mu[iz + 1][ix + 1]));
if ((mu[iz][ix] == 0.0) || (mu[iz][ix + 1] == 0.0) || (mu[iz + 1][ix] == 0.0) || (mu[iz + 1][ix + 1] == 0.0)) {
mu_zx[iz][ix] = 0.0;
}
// Arithmatic average of rho
// the averages are inversed for computational efficiency
rho_zp[iz][ix] = 1.0 / (0.5 * (rho[iz][ix] + rho[iz + 1][ix]));
rho_xp[iz][ix] = 1.0 / (0.5 * (rho[iz][ix] + rho[iz][ix + 1]));
if ((rho[iz][ix] < 1e-4) && (rho[iz + 1][ix] < 1e-4)) {
rho_zp[iz][ix] = 0.0;
}
if ((rho[iz][ix] < 1e-4) && (rho[iz][ix + 1] < 1e-4)) {
rho_zp[iz][ix] = 0.0;
}
}
}
}
void FDMedium::allocate_medium(int dimz, int dimx) {
// Allocates the basic material parameters (Lame parameters)
allocate_array_2d(rho, dimz, dimx);
allocate_array_2d(lam, dimz, dimx);
allocate_array_2d(mu, dimz, dimx);
}
void FDMedium::allocate_medium_av(int dimz, int dimx) {
// Allocates the averaged material parameters
allocate_array_2d(rho_zp, dimz, dimx);
allocate_array_2d(rho_xp, dimz, dimx);
allocate_array_2d(mu_zx, dimz, dimx);
}
void FDMedium::deallocate_medium(int dimz) {
// Allocates the basic material parameters (Lame parameters)
deallocate_array_2d(rho, dimz);
deallocate_array_2d(lam, dimz);
deallocate_array_2d(mu, dimz);
}
void FDMedium::deallocate_medium_av(int dimz) {
// Allocates the averaged material parameters
deallocate_array_2d(rho_zp, dimz);
deallocate_array_2d(rho_xp, dimz);
deallocate_array_2d(mu_zx, dimz);
} |
b8ea4ba4553b9a92c7f4a3be79c4ec766b00db92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019, NVIDIA Corporation.
* Copyright 2019, Blender Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#include "kernel/kernel_compat_optix.h"
#include "util/util_atomic.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "../cuda/kernel_cuda_image.h" // Texture lookup uses normal CUDA intrinsics
#include "kernel/kernel_path.h"
#include "kernel/kernel_bake.h"
// clang-format on
template<typename T> ccl_device_forceinline T *get_payload_ptr_0()
{
return (T *)(((uint64_t)optixGetPayload_1() << 32) | optixGetPayload_0());
}
template<typename T> ccl_device_forceinline T *get_payload_ptr_2()
{
return (T *)(((uint64_t)optixGetPayload_3() << 32) | optixGetPayload_2());
}
template<bool always = false> ccl_device_forceinline uint get_object_id()
{
#ifdef __OBJECT_MOTION__
// Always get the the instance ID from the TLAS
// There might be a motion transform node between TLAS and BLAS which does not have one
uint object = optixGetInstanceIdFromHandle(optixGetTransformListHandle(0));
#else
uint object = optixGetInstanceId();
#endif
// Choose between always returning object ID or only for instances
if (always)
// Can just remove the high bit since instance always contains object ID
return object & 0x7FFFFF;
// Set to OBJECT_NONE if this is not an instanced object
else if (object & 0x800000)
object = OBJECT_NONE;
return object;
}
extern "C" __global__ void __raygen__kernel_optix_path_trace()
{
KernelGlobals kg; // Allocate stack storage for common data
const uint3 launch_index = optixGetLaunchIndex();
// Keep threads for same pixel together to improve occupancy of warps
uint pixel_offset = launch_index.x / __params.tile.num_samples;
uint sample_offset = launch_index.x % __params.tile.num_samples;
kernel_path_trace(&kg,
__params.tile.buffer,
__params.tile.start_sample + sample_offset,
__params.tile.x + pixel_offset,
__params.tile.y + launch_index.y,
__params.tile.offset,
__params.tile.stride);
}
#ifdef __BAKING__
extern "C" __global__ void __raygen__kernel_optix_bake()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_bake_evaluate(&kg,
p.input,
p.output,
(ShaderEvalType)p.type,
p.filter,
p.sx + optixGetLaunchIndex().x,
p.offset,
p.sample);
}
#endif
extern "C" __global__ void __raygen__kernel_optix_displace()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_displace_evaluate(&kg, p.input, p.output, p.sx + optixGetLaunchIndex().x);
}
extern "C" __global__ void __raygen__kernel_optix_background()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_background_evaluate(&kg, p.input, p.output, p.sx + optixGetLaunchIndex().x);
}
extern "C" __global__ void __miss__kernel_optix_miss()
{
// 'kernel_path_lamp_emission' checks intersection distance, so need to set it even on a miss
optixSetPayload_0(__float_as_uint(optixGetRayTmax()));
optixSetPayload_5(PRIMITIVE_NONE);
}
extern "C" __global__ void __anyhit__kernel_optix_local_hit()
{
#ifdef __BVH_LOCAL__
const uint object = get_object_id<true>();
if (object != optixGetPayload_4() /* local_object */) {
// Only intersect with matching object
return optixIgnoreIntersection();
}
int hit = 0;
uint *const lcg_state = get_payload_ptr_0<uint>();
LocalIntersection *const local_isect = get_payload_ptr_2<LocalIntersection>();
if (lcg_state) {
const uint max_hits = optixGetPayload_5();
for (int i = min(max_hits, local_isect->num_hits) - 1; i >= 0; --i) {
if (optixGetRayTmax() == local_isect->hits[i].t) {
return optixIgnoreIntersection();
}
}
hit = local_isect->num_hits++;
if (local_isect->num_hits > max_hits) {
hit = lcg_step_uint(lcg_state) % local_isect->num_hits;
if (hit >= max_hits) {
return optixIgnoreIntersection();
}
}
}
else {
if (local_isect->num_hits && optixGetRayTmax() > local_isect->hits[0].t) {
// Record closest intersection only
// Do not terminate ray here, since there is no guarantee about distance ordering in any-hit
return optixIgnoreIntersection();
}
local_isect->num_hits = 1;
}
Intersection *isect = &local_isect->hits[hit];
isect->t = optixGetRayTmax();
isect->prim = optixGetPrimitiveIndex();
isect->object = get_object_id();
isect->type = kernel_tex_fetch(__prim_type, isect->prim);
const float2 barycentrics = optixGetTriangleBarycentrics();
isect->u = 1.0f - barycentrics.y - barycentrics.x;
isect->v = barycentrics.x;
// Record geometric normal
const uint tri_vindex = kernel_tex_fetch(__prim_tri_index, isect->prim);
const float3 tri_a = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 0));
const float3 tri_b = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 1));
const float3 tri_c = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 2));
local_isect->Ng[hit] = normalize(cross(tri_b - tri_a, tri_c - tri_a));
// Continue tracing (without this the trace call would return after the first hit)
optixIgnoreIntersection();
#endif
}
extern "C" __global__ void __anyhit__kernel_optix_shadow_all_hit()
{
#ifdef __SHADOW_RECORD_ALL__
const uint prim = optixGetPrimitiveIndex();
# ifdef __VISIBILITY_FLAG__
const uint visibility = optixGetPayload_4();
if ((kernel_tex_fetch(__prim_visibility, prim) & visibility) == 0) {
return optixIgnoreIntersection();
}
# endif
// Offset into array with num_hits
Intersection *const isect = get_payload_ptr_0<Intersection>() + optixGetPayload_2();
isect->t = optixGetRayTmax();
isect->prim = prim;
isect->object = get_object_id();
isect->type = kernel_tex_fetch(__prim_type, prim);
if (optixIsTriangleHit()) {
const float2 barycentrics = optixGetTriangleBarycentrics();
isect->u = 1.0f - barycentrics.y - barycentrics.x;
isect->v = barycentrics.x;
}
# ifdef __HAIR__
else {
const float u = __uint_as_float(optixGetAttribute_0());
isect->u = u;
isect->v = __uint_as_float(optixGetAttribute_1());
// Filter out curve endcaps
if (u == 0.0f || u == 1.0f) {
return optixIgnoreIntersection();
}
}
# endif
# ifdef __TRANSPARENT_SHADOWS__
// Detect if this surface has a shader with transparent shadows
if (!shader_transparent_shadow(NULL, isect) || optixGetPayload_2() >= optixGetPayload_3()) {
# endif
// This is an opaque hit or the hit limit has been reached, abort traversal
optixSetPayload_5(true);
return optixTerminateRay();
# ifdef __TRANSPARENT_SHADOWS__
}
optixSetPayload_2(optixGetPayload_2() + 1); // num_hits++
// Continue tracing
optixIgnoreIntersection();
# endif
#endif
}
extern "C" __global__ void __anyhit__kernel_optix_visibility_test()
{
uint visibility = optixGetPayload_4();
#ifdef __VISIBILITY_FLAG__
const uint prim = optixGetPrimitiveIndex();
if ((kernel_tex_fetch(__prim_visibility, prim) & visibility) == 0) {
return optixIgnoreIntersection();
}
#endif
#ifdef __HAIR__
if (!optixIsTriangleHit()) {
// Filter out curve endcaps
const float u = __uint_as_float(optixGetAttribute_0());
if (u == 0.0f || u == 1.0f) {
return optixIgnoreIntersection();
}
}
#endif
// Shadow ray early termination
if (visibility & PATH_RAY_SHADOW_OPAQUE) {
return optixTerminateRay();
}
}
extern "C" __global__ void __closesthit__kernel_optix_hit()
{
optixSetPayload_0(__float_as_uint(optixGetRayTmax())); // Intersection distance
optixSetPayload_3(optixGetPrimitiveIndex());
optixSetPayload_4(get_object_id());
// Can be PRIMITIVE_TRIANGLE and PRIMITIVE_MOTION_TRIANGLE or curve type and segment index
optixSetPayload_5(kernel_tex_fetch(__prim_type, optixGetPrimitiveIndex()));
if (optixIsTriangleHit()) {
const float2 barycentrics = optixGetTriangleBarycentrics();
optixSetPayload_1(__float_as_uint(1.0f - barycentrics.y - barycentrics.x));
optixSetPayload_2(__float_as_uint(barycentrics.x));
}
else {
optixSetPayload_1(optixGetAttribute_0()); // Same as 'optixGetCurveParameter()'
optixSetPayload_2(optixGetAttribute_1());
}
}
#ifdef __HAIR__
ccl_device_inline void optix_intersection_curve(const uint prim, const uint type)
{
const uint object = get_object_id<true>();
const uint visibility = optixGetPayload_4();
float3 P = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
// The direction is not normalized by default, but the curve intersection routine expects that
float len;
dir = normalize_len(dir, &len);
# ifdef __OBJECT_MOTION__
const float time = optixGetRayTime();
# else
const float time = 0.0f;
# endif
Intersection isect;
isect.t = optixGetRayTmax();
// Transform maximum distance into object space
if (isect.t != FLT_MAX)
isect.t *= len;
if (curve_intersect(NULL, &isect, P, dir, visibility, object, prim, time, type)) {
optixReportIntersection(isect.t / len,
type & PRIMITIVE_ALL,
__float_as_int(isect.u), // Attribute_0
__float_as_int(isect.v)); // Attribute_1
}
}
extern "C" __global__ void __intersection__curve_ribbon()
{
const uint prim = optixGetPrimitiveIndex();
const uint type = kernel_tex_fetch(__prim_type, prim);
if (type & (PRIMITIVE_CURVE_RIBBON | PRIMITIVE_MOTION_CURVE_RIBBON)) {
optix_intersection_curve(prim, type);
}
}
extern "C" __global__ void __intersection__curve_all()
{
const uint prim = optixGetPrimitiveIndex();
const uint type = kernel_tex_fetch(__prim_type, prim);
optix_intersection_curve(prim, type);
}
#endif
#ifdef __KERNEL_DEBUG__
extern "C" __global__ void __exception__kernel_optix_exception()
{
printf("Unhandled exception occured: code %d!\n", optixGetExceptionCode());
}
#endif
| b8ea4ba4553b9a92c7f4a3be79c4ec766b00db92.cu | /*
* Copyright 2019, NVIDIA Corporation.
* Copyright 2019, Blender Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#include "kernel/kernel_compat_optix.h"
#include "util/util_atomic.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
#include "../cuda/kernel_cuda_image.h" // Texture lookup uses normal CUDA intrinsics
#include "kernel/kernel_path.h"
#include "kernel/kernel_bake.h"
// clang-format on
template<typename T> ccl_device_forceinline T *get_payload_ptr_0()
{
return (T *)(((uint64_t)optixGetPayload_1() << 32) | optixGetPayload_0());
}
template<typename T> ccl_device_forceinline T *get_payload_ptr_2()
{
return (T *)(((uint64_t)optixGetPayload_3() << 32) | optixGetPayload_2());
}
template<bool always = false> ccl_device_forceinline uint get_object_id()
{
#ifdef __OBJECT_MOTION__
// Always get the the instance ID from the TLAS
// There might be a motion transform node between TLAS and BLAS which does not have one
uint object = optixGetInstanceIdFromHandle(optixGetTransformListHandle(0));
#else
uint object = optixGetInstanceId();
#endif
// Choose between always returning object ID or only for instances
if (always)
// Can just remove the high bit since instance always contains object ID
return object & 0x7FFFFF;
// Set to OBJECT_NONE if this is not an instanced object
else if (object & 0x800000)
object = OBJECT_NONE;
return object;
}
extern "C" __global__ void __raygen__kernel_optix_path_trace()
{
KernelGlobals kg; // Allocate stack storage for common data
const uint3 launch_index = optixGetLaunchIndex();
// Keep threads for same pixel together to improve occupancy of warps
uint pixel_offset = launch_index.x / __params.tile.num_samples;
uint sample_offset = launch_index.x % __params.tile.num_samples;
kernel_path_trace(&kg,
__params.tile.buffer,
__params.tile.start_sample + sample_offset,
__params.tile.x + pixel_offset,
__params.tile.y + launch_index.y,
__params.tile.offset,
__params.tile.stride);
}
#ifdef __BAKING__
extern "C" __global__ void __raygen__kernel_optix_bake()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_bake_evaluate(&kg,
p.input,
p.output,
(ShaderEvalType)p.type,
p.filter,
p.sx + optixGetLaunchIndex().x,
p.offset,
p.sample);
}
#endif
extern "C" __global__ void __raygen__kernel_optix_displace()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_displace_evaluate(&kg, p.input, p.output, p.sx + optixGetLaunchIndex().x);
}
extern "C" __global__ void __raygen__kernel_optix_background()
{
KernelGlobals kg;
const ShaderParams &p = __params.shader;
kernel_background_evaluate(&kg, p.input, p.output, p.sx + optixGetLaunchIndex().x);
}
extern "C" __global__ void __miss__kernel_optix_miss()
{
// 'kernel_path_lamp_emission' checks intersection distance, so need to set it even on a miss
optixSetPayload_0(__float_as_uint(optixGetRayTmax()));
optixSetPayload_5(PRIMITIVE_NONE);
}
extern "C" __global__ void __anyhit__kernel_optix_local_hit()
{
#ifdef __BVH_LOCAL__
const uint object = get_object_id<true>();
if (object != optixGetPayload_4() /* local_object */) {
// Only intersect with matching object
return optixIgnoreIntersection();
}
int hit = 0;
uint *const lcg_state = get_payload_ptr_0<uint>();
LocalIntersection *const local_isect = get_payload_ptr_2<LocalIntersection>();
if (lcg_state) {
const uint max_hits = optixGetPayload_5();
for (int i = min(max_hits, local_isect->num_hits) - 1; i >= 0; --i) {
if (optixGetRayTmax() == local_isect->hits[i].t) {
return optixIgnoreIntersection();
}
}
hit = local_isect->num_hits++;
if (local_isect->num_hits > max_hits) {
hit = lcg_step_uint(lcg_state) % local_isect->num_hits;
if (hit >= max_hits) {
return optixIgnoreIntersection();
}
}
}
else {
if (local_isect->num_hits && optixGetRayTmax() > local_isect->hits[0].t) {
// Record closest intersection only
// Do not terminate ray here, since there is no guarantee about distance ordering in any-hit
return optixIgnoreIntersection();
}
local_isect->num_hits = 1;
}
Intersection *isect = &local_isect->hits[hit];
isect->t = optixGetRayTmax();
isect->prim = optixGetPrimitiveIndex();
isect->object = get_object_id();
isect->type = kernel_tex_fetch(__prim_type, isect->prim);
const float2 barycentrics = optixGetTriangleBarycentrics();
isect->u = 1.0f - barycentrics.y - barycentrics.x;
isect->v = barycentrics.x;
// Record geometric normal
const uint tri_vindex = kernel_tex_fetch(__prim_tri_index, isect->prim);
const float3 tri_a = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 0));
const float3 tri_b = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 1));
const float3 tri_c = float4_to_float3(kernel_tex_fetch(__prim_tri_verts, tri_vindex + 2));
local_isect->Ng[hit] = normalize(cross(tri_b - tri_a, tri_c - tri_a));
// Continue tracing (without this the trace call would return after the first hit)
optixIgnoreIntersection();
#endif
}
extern "C" __global__ void __anyhit__kernel_optix_shadow_all_hit()
{
#ifdef __SHADOW_RECORD_ALL__
const uint prim = optixGetPrimitiveIndex();
# ifdef __VISIBILITY_FLAG__
const uint visibility = optixGetPayload_4();
if ((kernel_tex_fetch(__prim_visibility, prim) & visibility) == 0) {
return optixIgnoreIntersection();
}
# endif
// Offset into array with num_hits
Intersection *const isect = get_payload_ptr_0<Intersection>() + optixGetPayload_2();
isect->t = optixGetRayTmax();
isect->prim = prim;
isect->object = get_object_id();
isect->type = kernel_tex_fetch(__prim_type, prim);
if (optixIsTriangleHit()) {
const float2 barycentrics = optixGetTriangleBarycentrics();
isect->u = 1.0f - barycentrics.y - barycentrics.x;
isect->v = barycentrics.x;
}
# ifdef __HAIR__
else {
const float u = __uint_as_float(optixGetAttribute_0());
isect->u = u;
isect->v = __uint_as_float(optixGetAttribute_1());
// Filter out curve endcaps
if (u == 0.0f || u == 1.0f) {
return optixIgnoreIntersection();
}
}
# endif
# ifdef __TRANSPARENT_SHADOWS__
// Detect if this surface has a shader with transparent shadows
if (!shader_transparent_shadow(NULL, isect) || optixGetPayload_2() >= optixGetPayload_3()) {
# endif
// This is an opaque hit or the hit limit has been reached, abort traversal
optixSetPayload_5(true);
return optixTerminateRay();
# ifdef __TRANSPARENT_SHADOWS__
}
optixSetPayload_2(optixGetPayload_2() + 1); // num_hits++
// Continue tracing
optixIgnoreIntersection();
# endif
#endif
}
extern "C" __global__ void __anyhit__kernel_optix_visibility_test()
{
uint visibility = optixGetPayload_4();
#ifdef __VISIBILITY_FLAG__
const uint prim = optixGetPrimitiveIndex();
if ((kernel_tex_fetch(__prim_visibility, prim) & visibility) == 0) {
return optixIgnoreIntersection();
}
#endif
#ifdef __HAIR__
if (!optixIsTriangleHit()) {
// Filter out curve endcaps
const float u = __uint_as_float(optixGetAttribute_0());
if (u == 0.0f || u == 1.0f) {
return optixIgnoreIntersection();
}
}
#endif
// Shadow ray early termination
if (visibility & PATH_RAY_SHADOW_OPAQUE) {
return optixTerminateRay();
}
}
extern "C" __global__ void __closesthit__kernel_optix_hit()
{
optixSetPayload_0(__float_as_uint(optixGetRayTmax())); // Intersection distance
optixSetPayload_3(optixGetPrimitiveIndex());
optixSetPayload_4(get_object_id());
// Can be PRIMITIVE_TRIANGLE and PRIMITIVE_MOTION_TRIANGLE or curve type and segment index
optixSetPayload_5(kernel_tex_fetch(__prim_type, optixGetPrimitiveIndex()));
if (optixIsTriangleHit()) {
const float2 barycentrics = optixGetTriangleBarycentrics();
optixSetPayload_1(__float_as_uint(1.0f - barycentrics.y - barycentrics.x));
optixSetPayload_2(__float_as_uint(barycentrics.x));
}
else {
optixSetPayload_1(optixGetAttribute_0()); // Same as 'optixGetCurveParameter()'
optixSetPayload_2(optixGetAttribute_1());
}
}
#ifdef __HAIR__
ccl_device_inline void optix_intersection_curve(const uint prim, const uint type)
{
const uint object = get_object_id<true>();
const uint visibility = optixGetPayload_4();
float3 P = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
// The direction is not normalized by default, but the curve intersection routine expects that
float len;
dir = normalize_len(dir, &len);
# ifdef __OBJECT_MOTION__
const float time = optixGetRayTime();
# else
const float time = 0.0f;
# endif
Intersection isect;
isect.t = optixGetRayTmax();
// Transform maximum distance into object space
if (isect.t != FLT_MAX)
isect.t *= len;
if (curve_intersect(NULL, &isect, P, dir, visibility, object, prim, time, type)) {
optixReportIntersection(isect.t / len,
type & PRIMITIVE_ALL,
__float_as_int(isect.u), // Attribute_0
__float_as_int(isect.v)); // Attribute_1
}
}
extern "C" __global__ void __intersection__curve_ribbon()
{
const uint prim = optixGetPrimitiveIndex();
const uint type = kernel_tex_fetch(__prim_type, prim);
if (type & (PRIMITIVE_CURVE_RIBBON | PRIMITIVE_MOTION_CURVE_RIBBON)) {
optix_intersection_curve(prim, type);
}
}
extern "C" __global__ void __intersection__curve_all()
{
const uint prim = optixGetPrimitiveIndex();
const uint type = kernel_tex_fetch(__prim_type, prim);
optix_intersection_curve(prim, type);
}
#endif
#ifdef __KERNEL_DEBUG__
extern "C" __global__ void __exception__kernel_optix_exception()
{
printf("Unhandled exception occured: code %d!\n", optixGetExceptionCode());
}
#endif
|
68042e86491507c9d6e423d4378b912de759814e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cuBLASTP - Fine-Grained Parallelization of Protein Sequence Search on CPU+GPU
* Version 0.1 (beta)
*
* (c) 2015 Virginia Polytechnic Institute & State University (Virginia Tech)
*
* This version of cuBLASTP is licensed for non-commercial use only,
* as specified in LICENSE files in licensing directory. For all other use
* contact [email protected]
*
* Developer: Shucai Xiao
*
*/
//#define maximum(a,b) (((a) > (b)) ? (a) : (b))
#define TARGET 0
__device__ __constant__ int2 scoreMatrixC[1640];
__device__ __constant__ unsigned char querySequenceC[40000];
struct gappedExtensionParameters {
int2 semiGappedOpenGap;
int2 semiGappedExtendGap;
int4 semiGappedExtensionN;
int4 semiGappedDropoffIncrease;
unsigned char encoding_numCodes;
};
__device__ struct dpResults semiGappedScoring_dpBeforeSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
struct coordinate seed, int4 dropoff, int4 *bestRow, int4 *insertQrow,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN);
__device__ struct dpResults semiGappedScoring_dpAfterSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
int4 dropoff, int4 subjectLength, int4 *bestRow, int4 *insertQrow,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN, int4 queryOffset);
__device__ int4 semiGappedScoring_scoreGPU(struct ungappedExtension *ungappedExtension,
struct PSSMatrixFP PSSMatrix,
int4 subjectSize,
unsigned char *subject,
int4 dropoff,
int4 semiGappedDropoffIncrease,
int4 *bestRow,
int4 *insertQRow,
unsigned char encoding_numCodes,
int4 openGapPenalty,
int4 extensionGapPenalty,
int4 semiGappedExtensionN);
__device__ struct PSSMatrixFP PSSMatrixFP_chop(struct PSSMatrixFP PSSMatrixFP, int4 amount, unsigned char encoding_numCodes)
{
struct PSSMatrixFP chopped;
chopped.matrix = PSSMatrixFP.matrix + amount * encoding_numCodes;
chopped.queryCodes = PSSMatrixFP.queryCodes + amount;
chopped.bytePackedCodes = PSSMatrixFP.bytePackedCodes + amount;
chopped.xorCodes = PSSMatrixFP.xorCodes + amount;
chopped.length = PSSMatrixFP.length - amount;
chopped.highestValue = PSSMatrixFP.highestValue;
chopped.lowestValue = PSSMatrixFP.lowestValue;
chopped.strandLength = PSSMatrixFP.strandLength - amount;
if (chopped.strandLength < 0)
chopped.strandLength = 0;
return chopped;
}
__device__ struct coordinate ungappedExtension_findProteinSeed1(
struct ungappedExtension* ungappedExtension,
struct PSSMatrixFP PSSMatrixFP,
unsigned char* subject,
unsigned char encoding_numCodes)
{
char *queryWindowStart, *queryWindowEnd;
unsigned char *subjectWindowStart, *subjectWindowEnd;
char* bestQueryPosition;
unsigned char* bestSubjectPosition;
int4 bestSegmentScore;
int4 nominalScore, count;
struct coordinate seed;
if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11)
{
// The seed point is the middle of the extension
seed.queryOffset = (ungappedExtension->end.queryOffset +
ungappedExtension->start.queryOffset) / 2;
seed.subjectOffset = (ungappedExtension->end.subjectOffset +
ungappedExtension->start.subjectOffset) / 2;
}
else
{
// Else find the highest scoring length-11 segment of the ungapped extension
queryWindowStart = queryWindowEnd = PSSMatrixFP.matrix + ungappedExtension->start.queryOffset * encoding_numCodes;
subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset;
// Find initial score for first 11 positions
nominalScore = 0;
count = 0;
while (count < 11)
{
nominalScore += queryWindowEnd[*subjectWindowEnd];
queryWindowEnd += encoding_numCodes;
subjectWindowEnd++;
count++;
}
queryWindowEnd -= encoding_numCodes;
subjectWindowEnd--;
// By default first-11 positions gives best position and score
bestQueryPosition = queryWindowStart;
bestSubjectPosition = subjectWindowStart;
bestSegmentScore = nominalScore;
// Now slide the window across and record the better scores/positions
while (queryWindowEnd < PSSMatrixFP.matrix + ungappedExtension->end.queryOffset * encoding_numCodes)
{
// Advance window end, add new position value
queryWindowEnd += encoding_numCodes;
subjectWindowEnd++;
nominalScore += queryWindowEnd[*subjectWindowEnd];
// Remove position that we will leave behind
nominalScore -= queryWindowStart[*subjectWindowStart];
queryWindowStart += encoding_numCodes;
subjectWindowStart++;
// Check if best window position yet
if (nominalScore > bestSegmentScore)
{
bestSegmentScore = nominalScore;
bestQueryPosition = queryWindowStart;
bestSubjectPosition = subjectWindowStart;
}
}
// Middle of the best window is the seed position
seed.queryOffset = (bestQueryPosition - PSSMatrixFP.matrix) / encoding_numCodes + 5;
seed.subjectOffset = bestSubjectPosition + 5 - subject;
}
return seed;
}
__device__ void alignments_pruneRegion(struct ungappedExtension *ungappedExtension,
struct ungappedExtension *curExtension,
int ungappedExtensionNum)
{
int i;
for (i = 0; i < ungappedExtensionNum; i++)
{
if (ungappedExtension[i].status != ungappedExtension_DELETED)
{
if (ungappedExtension[i].start.queryOffset >= curExtension->start.queryOffset &&
ungappedExtension[i].end.queryOffset <= curExtension->end.queryOffset &&
ungappedExtension[i].start.subjectOffset >= curExtension->start.subjectOffset &&
ungappedExtension[i].end.subjectOffset <= curExtension->end.subjectOffset &&
ungappedExtension[i].nominalScore <= curExtension->nominalScore &&
ungappedExtension + i != curExtension)
{
ungappedExtension[i].status = ungappedExtension_DELETED;
}
}
}
return;
}
// Perform semi-gapped alignment with restricted insertion
__global__ void semiGappedScoring_kernel(struct sequenceDataFP *sequenceDataFP,
unsigned char *sequences,
struct PSSMatrixFP *PSSMatrix,
char *PSSMatrixBody,
struct gappedExtensionParameters *parameters,
int *startLocArray,
int *ungappedExtensionNumArray,
int alignmentNum,
struct ungappedExtension *ungappedExtensions,
int4 *bestScores,
int4 *numGoodExtensions,
int4 *numSemiGapping,
int4 *orderArray,
int4 *bestRowAll,
int4 *insertQRowAll,
int4 dropoff,
int4 nominalR1cutoff)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= alignmentNum)
{
return;
}
int2 openGapPenalty = parameters->semiGappedOpenGap;
int2 extensionGapPenalty = parameters->semiGappedExtendGap;
int4 semiGappedExtensionN = parameters->semiGappedExtensionN;
int4 semiGappedDropoffIncrease = parameters->semiGappedDropoffIncrease;
unsigned char encoding_numCodes = parameters->encoding_numCodes;
PSSMatrix->matrix = PSSMatrixBody + encoding_numCodes;
//int4 ungappedStartLoc = startLocArray[orderArray[tid]];
int4 ungappedStartLoc = startLocArray[tid];
int4 sequenceCount;
struct ungappedExtension *ungappedExtension;
sequenceCount = ungappedExtensions[ungappedStartLoc].sequenceCount;
int4 subjectOffset = sequenceDataFP[sequenceCount].offset;
unsigned char *subject = sequences + subjectOffset;
int4 *bestRow = bestRowAll + subjectOffset;
int4 *insertQRow = insertQRowAll + subjectOffset;
uint4 subjectLength = sequenceDataFP[sequenceCount].sequenceLength;
int4 bestScore = 0;
int numSemiGappingDevice = 0;
uint4 goodExtensionNo, ungappedExtensionNo;
goodExtensionNo = 0;
//for (ungappedExtensionNo = 0; ungappedExtensionNo < ungappedExtensionNumArray[orderArray[tid]]; ungappedExtensionNo++)
for (ungappedExtensionNo = 0; ungappedExtensionNo < ungappedExtensionNumArray[tid]; ungappedExtensionNo++)
{
ungappedExtension = &ungappedExtensions[ungappedStartLoc + ungappedExtensionNo];
if (ungappedExtension->status != ungappedExtension_DELETED)
{
if (ungappedExtension->seed.queryOffset == -1 &&
ungappedExtension->seed.subjectOffset == -1)
{
ungappedExtension->seed = ungappedExtension_findProteinSeed1(ungappedExtension, *PSSMatrix, subject, encoding_numCodes);
}
numSemiGappingDevice++;
ungappedExtension->nominalScore = semiGappedScoring_scoreGPU(ungappedExtension, *PSSMatrix,
subjectLength, subject, dropoff, semiGappedDropoffIncrease,
bestRow, insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN);
ungappedExtension->status = ungappedExtension_SEMIGAPPED;
if (ungappedExtension->nominalScore >= nominalR1cutoff)
{
if (ungappedExtension->nominalScore > bestScore)
{
bestScore = ungappedExtension->nominalScore;
}
}
else
{
ungappedExtension->status = ungappedExtension_DELETED;
}
goodExtensionNo++;
//alignments_pruneRegion(&ungappedExtensions[ungappedStartLoc], ungappedExtension, ungappedExtensionNumArray[orderArray[tid]]);
alignments_pruneRegion(&ungappedExtensions[ungappedStartLoc], ungappedExtension, ungappedExtensionNumArray[tid]);
}
}
numSemiGapping[tid] = numSemiGappingDevice;
//bestScores[orderArray[tid]] = bestScore;
bestScores[tid] = bestScore;
if (bestScore >= nominalR1cutoff)
{
numGoodExtensions[tid] += goodExtensionNo;
}
return;
}
__device__ int4 semiGappedScoring_scoreGPU(struct ungappedExtension *ungappedExtension,
struct PSSMatrixFP PSSMatrix,
int4 subjectSize,
unsigned char *subject,
int4 dropoff,
int4 semiGappedDropoffIncrease,
int4 *bestRow,
int4 *insertQRow,
unsigned char encoding_numCodes,
int4 openGapPenalty,
int4 extensionGapPenalty,
int4 semiGappedExtensionN)
{
struct coordinate seed;
unsigned char *choppedSubject;
struct PSSMatrixFP choppedPSSMatrix;
int4 choppedSubjectSize;
struct dpResults beforeDpResults, afterDpResults;
int4 strandOffset = 0;
// Perform dynamic programming for points before the seed
seed = ungappedExtension->seed;
if (seed.queryOffset > PSSMatrix.strandLength)
{
// If query position is in the second strand, remove first strand from PSSM
strandOffset = PSSMatrix.strandLength;
seed.queryOffset -= PSSMatrix.strandLength;
PSSMatrix = PSSMatrixFP_chop(PSSMatrix, PSSMatrix.strandLength, encoding_numCodes);
}
else
{
// Otherwise remove second strand
PSSMatrix.length = PSSMatrix.strandLength;
}
beforeDpResults = semiGappedScoring_dpBeforeSeedGPU(subject, PSSMatrix,
seed, dropoff + semiGappedDropoffIncrease, bestRow,
insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN);
// Chop the start off the query and subject so they begin at the seed
choppedPSSMatrix = PSSMatrixFP_chop(PSSMatrix, seed.queryOffset, encoding_numCodes);
choppedSubject = subject + seed.subjectOffset;
choppedSubjectSize = subjectSize - seed.subjectOffset;
// Perform dynamic programming for points after the seed
afterDpResults = semiGappedScoring_dpAfterSeedGPU(choppedSubject, choppedPSSMatrix,
dropoff + semiGappedDropoffIncrease, choppedSubjectSize, bestRow,
insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN, seed.queryOffset);
// Re-adjust result change due to chopping subject/query and strand adjustment
afterDpResults.best.queryOffset += seed.queryOffset + strandOffset;
afterDpResults.best.subjectOffset += seed.subjectOffset;
beforeDpResults.best.queryOffset += strandOffset;
// Associate best scoring start and end points with the ungapped extension
ungappedExtension->start = beforeDpResults.best;
ungappedExtension->end = afterDpResults.best;
// Determine score by combining score from the two traces, and the match score at
// the seed position
return beforeDpResults.bestScore + afterDpResults.bestScore +
choppedPSSMatrix.matrix[choppedSubject[0]];
}
// Perform dynamic programming to explore possible start points and alignments that end at
// the given seed and find the best score
__device__ struct dpResults semiGappedScoring_dpBeforeSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
struct coordinate seed, int4 dropoff, int4 *bestRowCur, int4 *insertQrowCur,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN)
{
int4 queryPosition, bestQueryPosition;
int4 matrixColumn;
unsigned char *rowDropoff, *columnDropoff;
int4 *bestRow, *insertQrow;
unsigned char *subjectPosition, *bestSubjectPosition, *startSubjectPosition;
int4 bestScore = 0;
int4 insertS, rowOffset;
int4 subjectDistance;
int4 oldBest, match, previousOldBest;
unsigned char rightOfDropoff;
int4 queryCount, subjectCount;
struct dpResults dpResults;
bestSubjectPosition = subjectPosition = startSubjectPosition = subject + seed.subjectOffset - 1;
bestQueryPosition = queryPosition = seed.queryOffset - 1;
// Initialize row pointers
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Set initial row dropoff and column dropoff
rowDropoff = subject;
columnDropoff = subject + seed.subjectOffset;
// Using first column of query matrix
matrixColumn = queryPosition + 1;
// -----FIRST ROW-----
// -----FIRST CELL-----
// Set M value for bottom-right cell
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// M must be the best
*bestRow = match;
// Only gap opens possible
//*insertQrow = insertS = match - parameters_semiGappedOpenGap;
*insertQrow = insertS = match - openGapPenalty;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
subjectDistance = 0;
subjectPosition--; bestRow--; insertQrow--;
// ----- REMAINING CELLS -----
// For each remaining column in the bottom row, scanning from right-to-left
while (subjectPosition >= subject)
{
// Set value for M
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)]
- openGapPenalty - subjectDistance * extensionGapPenalty;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// Set DUMMY Ix value, which should never be used
*insertQrow = constants_gappedExtensionDummyValue;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
rowDropoff = subjectPosition;
// And stop processing row
break;
}
subjectPosition--; bestRow--; insertQrow--;
subjectDistance++;
}
// Start queryCount at N. Only allow insertS for every Nth row when queryCount
// reaches 0
queryCount = semiGappedExtensionN;
// -----REMAINING ROWS-----
//while (queryPosition > PSSMatrixFP.matrix && rowDropoff < columnDropoff)
while (queryPosition > 0 && rowDropoff < columnDropoff)
{
queryPosition--;
queryCount--;
subjectPosition = columnDropoff - 1;
// Determine subjectCount for initial subjectPosition. Is used to only allow
// insertQ when subjectOffset % parameters_semiGappedExtensionN == 0
subjectCount = (int4)(startSubjectPosition - subjectPosition) % semiGappedExtensionN;
if (subjectCount)
subjectCount = semiGappedExtensionN - subjectCount;
// Reset row pointers to start of rows
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Using next column of query matrix
matrixColumn = queryPosition + 1;
// ************ All rows we are not allowing insertS
if (queryCount)
{
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// -----FAR RIGHT CELL-----
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
// -----CELLS RIGHT OF ROW DROPOFF-----
while (subjectPosition >= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Calculate new M value, which is also the best
oldBest = *bestRow;
match = *bestRow = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
// We are allowing insertQ this column
else
{
// Calculate new M value
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Ix
if (match > *insertQrow)
{
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
}
else
{
*bestRow = *insertQrow;
// Since M <= Ix, new Ix must derive from Ix
*insertQrow -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
}
// -----SINGLE CELL LEFT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition >= subject))
{
// Set value for best
*bestRow = match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
if (match + dropoff >= bestScore)
{
// Record dropoff position
rowDropoff = subjectPosition;
}
}
}
// ************ Every Nth row we allow insertS
else
{
// -----FAR RIGHT CELL-----
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// Set DUMMY value for Iy, which should never be used
insertS = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
// -----CELLS RIGHT OF ROW DROPOFF-----
while (subjectPosition >= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell (and cells to its right) are below dropoff
if (rightOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
}
}
// ** We are allowing insertQ this column
else
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M, Ix and Iy
if (match > insertS)
{
if (match > *insertQrow)
{
// Match is largest
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
else
{
if (insertS > *insertQrow)
{
// insertS is largest
*bestRow = insertS;
// Dummy Ix
*insertQrow = constants_gappedExtensionDummyValue;
// Calculate new Iy
insertS -= extensionGapPenalty;
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
// If score at current cell (and cells to its right) are below dropoff
if (rightOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
}
// -----SINGLE CELL LEFT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition >= subject))
{
// Calculate match value
match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set value for best
*bestRow = maximum(match, insertS);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
subjectPosition--; bestRow--; insertQrow--;
}
// -----CELLS LEFT OF ROW DROPOFF -----
if (!(bestScore > *(bestRow + 1) + dropoff))
{
while (subjectPosition >= subject)
{
// Set value for Iy and best
*bestRow = insertS;
insertS = insertS - extensionGapPenalty;
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Stop processing row
subjectPosition--;
break;
}
subjectPosition--; bestRow--; insertQrow--;
}
}
// Record dropoff position
rowDropoff = subjectPosition + 1;
// Clear insertS for next row
insertS = constants_gappedExtensionDummyValue;
// Reset queryCount
queryCount = semiGappedExtensionN;
}
}
dpResults.best.queryOffset = bestQueryPosition;
dpResults.best.subjectOffset = bestSubjectPosition - subject;
dpResults.bestScore = bestScore;
dpResults.traceback = NULL;
return dpResults;
}
// Perform dynamic programming to explore possible END points and alignments that start at
// the given seed and find the best score
__device__ struct dpResults semiGappedScoring_dpAfterSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
int4 dropoff, int4 subjectLength, int4 *bestRowCur, int4 *insertQrowCur,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN, int4 queryOffset)
{
int4 queryPosition, bestQueryPosition, queryEnd;
int4 matrixColumn;
unsigned char *rowDropoff, *columnDropoff;
unsigned char *subjectPosition, *bestSubjectPosition, *subjectEnd, *startSubjectPosition;
int4 *bestRow, *insertQrow;
int4 bestScore = 0;
int4 insertS, rowOffset;
int4 subjectDistance;
int4 oldBest, match, previousOldBest;
unsigned char leftOfDropoff;
int4 queryLength;
int4 queryCount, subjectCount;
struct dpResults dpResults;
queryLength = PSSMatrixFP.length;
subjectEnd = subject + subjectLength;
queryEnd = queryLength;
bestSubjectPosition = subjectPosition = startSubjectPosition = subject + 1;
bestQueryPosition = queryPosition = 1;
// Initialize rows
bestRow = bestRowCur + 1;
insertQrow = insertQrowCur + 1;
// Set initial row dropoff and column dropoff
rowDropoff = subject + subjectLength - 1;
columnDropoff = subject;
// -----FIRST ROW-----
// Using first column of query matrix
matrixColumn = queryPosition + 1 + queryOffset;
// -----FIRST CELL-----
// Set M value for top-left cell
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// M must be the best
*bestRow = match;
// Only gap opens possible
*insertQrow = insertS = match - openGapPenalty;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
subjectDistance = 0;
subjectPosition++; bestRow++; insertQrow++;
// ----- REMAINING CELLS -----
// For each remaining columns in the top row, scanning from left-to-right
while (subjectPosition < subjectEnd)
{
// Set value for M
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)]
- openGapPenalty - subjectDistance * extensionGapPenalty;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// Set DUMMY Ix value, which should never be used
*insertQrow = constants_gappedExtensionDummyValue;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
rowDropoff = subjectPosition;
// And stop processing row
break;
}
subjectPosition++; bestRow++; insertQrow++;
subjectDistance++;
}
// Start queryCount at N. Only allow insertS for every Nth row when queryCount
// reaches 0
queryCount = semiGappedExtensionN;
queryPosition += 1;
queryCount--;
// -----REMAINING ROWS-----
while (queryPosition < queryEnd && rowDropoff > columnDropoff)
{
subjectPosition = columnDropoff + 1;
// Determine subjectCount for initial subjectPosition. Is used to only allow
// insertQ when subjectOffset % parameters_semiGappedExtensionN == 0
subjectCount = ((int4)(subjectPosition - startSubjectPosition) % semiGappedExtensionN);
if (subjectCount)
subjectCount = semiGappedExtensionN - subjectCount;
// Reset rows
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Using next column of query matrix
matrixColumn = queryPosition + 1 + queryOffset;
// ************ All rows we are not allowing insertS
if (queryCount)
{
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// -----FAR LEFT CELL-----
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
// -----CELLS LEFT OF ROW DROPOFF-----
while (subjectPosition <= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Calculate new M value, which is also the best
oldBest = *bestRow;
match = *bestRow = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
// We are allowing insertQ this column
else
{
// Calculate new M value
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Ix
if (match > *insertQrow)
{
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
}
else
{
*bestRow = *insertQrow;
// Since M <= Ix, new Ix must derive from Ix
*insertQrow -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
}
// -----SINGLE CELL RIGHT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition < subjectEnd))
{
// Set value for best
*bestRow = match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
if (match + dropoff >= bestScore)
{
// Record dropoff position
rowDropoff = subjectPosition;
}
}
}
// ************ Every Nth row we allow insertS
else
{
// -----FAR LEFT CELL-----
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// Set DUMMY value for Iy, which should never be used
insertS = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
// -----CELLS LEFT OF ROW DROPOFF-----
while (subjectPosition <= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell (and cells to its left) are below dropoff
if (leftOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
}
}
// ** We are allowing insertQ this column
else
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M, Ix and Iy
if (match > insertS)
{
if (match > *insertQrow)
{
// Match is largest
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
else
{
if (insertS > *insertQrow)
{
// insertS is largest
*bestRow = insertS;
// Dummy Ix
*insertQrow = constants_gappedExtensionDummyValue;
// Calculate new Iy
insertS -= extensionGapPenalty;
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
// If score at current cell (and cells to its left) are below dropoff
if (leftOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
}
// -----SINGLE CELL RIGHT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition < subjectEnd))
{
// Calculate match value
match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set value for best
*bestRow = maximum(match, insertS);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
subjectPosition++; bestRow++; insertQrow++;
}
// -----CELLS RIGHT OF ROW DROPOFF -----
if (!(bestScore > *(bestRow - 1) + dropoff))
{
while (subjectPosition < subjectEnd)
{
// Set value for Iy and best
*bestRow = insertS;
insertS = insertS - extensionGapPenalty;
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Stop processing row
subjectPosition++;
break;
}
subjectPosition++; bestRow++; insertQrow++;
}
}
// Record dropoff position
rowDropoff = subjectPosition - 1;
// Clear insertS for next row
insertS = constants_gappedExtensionDummyValue;
// Reset queryCount
queryCount = semiGappedExtensionN;
}
//queryPosition += encoding_numCodes; queryCount--;
queryPosition += 1; queryCount--;
}
dpResults.best.queryOffset = bestQueryPosition;
dpResults.best.subjectOffset = bestSubjectPosition - subject;
dpResults.bestScore = bestScore;
dpResults.traceback = NULL;
return dpResults;
}
| 68042e86491507c9d6e423d4378b912de759814e.cu | /*
* cuBLASTP - Fine-Grained Parallelization of Protein Sequence Search on CPU+GPU
* Version 0.1 (beta)
*
* (c) 2015 Virginia Polytechnic Institute & State University (Virginia Tech)
*
* This version of cuBLASTP is licensed for non-commercial use only,
* as specified in LICENSE files in licensing directory. For all other use
* contact [email protected]
*
* Developer: Shucai Xiao
*
*/
//#define maximum(a,b) (((a) > (b)) ? (a) : (b))
#define TARGET 0
__device__ __constant__ int2 scoreMatrixC[1640];
__device__ __constant__ unsigned char querySequenceC[40000];
struct gappedExtensionParameters {
int2 semiGappedOpenGap;
int2 semiGappedExtendGap;
int4 semiGappedExtensionN;
int4 semiGappedDropoffIncrease;
unsigned char encoding_numCodes;
};
__device__ struct dpResults semiGappedScoring_dpBeforeSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
struct coordinate seed, int4 dropoff, int4 *bestRow, int4 *insertQrow,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN);
__device__ struct dpResults semiGappedScoring_dpAfterSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
int4 dropoff, int4 subjectLength, int4 *bestRow, int4 *insertQrow,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN, int4 queryOffset);
__device__ int4 semiGappedScoring_scoreGPU(struct ungappedExtension *ungappedExtension,
struct PSSMatrixFP PSSMatrix,
int4 subjectSize,
unsigned char *subject,
int4 dropoff,
int4 semiGappedDropoffIncrease,
int4 *bestRow,
int4 *insertQRow,
unsigned char encoding_numCodes,
int4 openGapPenalty,
int4 extensionGapPenalty,
int4 semiGappedExtensionN);
__device__ struct PSSMatrixFP PSSMatrixFP_chop(struct PSSMatrixFP PSSMatrixFP, int4 amount, unsigned char encoding_numCodes)
{
struct PSSMatrixFP chopped;
chopped.matrix = PSSMatrixFP.matrix + amount * encoding_numCodes;
chopped.queryCodes = PSSMatrixFP.queryCodes + amount;
chopped.bytePackedCodes = PSSMatrixFP.bytePackedCodes + amount;
chopped.xorCodes = PSSMatrixFP.xorCodes + amount;
chopped.length = PSSMatrixFP.length - amount;
chopped.highestValue = PSSMatrixFP.highestValue;
chopped.lowestValue = PSSMatrixFP.lowestValue;
chopped.strandLength = PSSMatrixFP.strandLength - amount;
if (chopped.strandLength < 0)
chopped.strandLength = 0;
return chopped;
}
__device__ struct coordinate ungappedExtension_findProteinSeed1(
struct ungappedExtension* ungappedExtension,
struct PSSMatrixFP PSSMatrixFP,
unsigned char* subject,
unsigned char encoding_numCodes)
{
char *queryWindowStart, *queryWindowEnd;
unsigned char *subjectWindowStart, *subjectWindowEnd;
char* bestQueryPosition;
unsigned char* bestSubjectPosition;
int4 bestSegmentScore;
int4 nominalScore, count;
struct coordinate seed;
if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11)
{
// The seed point is the middle of the extension
seed.queryOffset = (ungappedExtension->end.queryOffset +
ungappedExtension->start.queryOffset) / 2;
seed.subjectOffset = (ungappedExtension->end.subjectOffset +
ungappedExtension->start.subjectOffset) / 2;
}
else
{
// Else find the highest scoring length-11 segment of the ungapped extension
queryWindowStart = queryWindowEnd = PSSMatrixFP.matrix + ungappedExtension->start.queryOffset * encoding_numCodes;
subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset;
// Find initial score for first 11 positions
nominalScore = 0;
count = 0;
while (count < 11)
{
nominalScore += queryWindowEnd[*subjectWindowEnd];
queryWindowEnd += encoding_numCodes;
subjectWindowEnd++;
count++;
}
queryWindowEnd -= encoding_numCodes;
subjectWindowEnd--;
// By default first-11 positions gives best position and score
bestQueryPosition = queryWindowStart;
bestSubjectPosition = subjectWindowStart;
bestSegmentScore = nominalScore;
// Now slide the window across and record the better scores/positions
while (queryWindowEnd < PSSMatrixFP.matrix + ungappedExtension->end.queryOffset * encoding_numCodes)
{
// Advance window end, add new position value
queryWindowEnd += encoding_numCodes;
subjectWindowEnd++;
nominalScore += queryWindowEnd[*subjectWindowEnd];
// Remove position that we will leave behind
nominalScore -= queryWindowStart[*subjectWindowStart];
queryWindowStart += encoding_numCodes;
subjectWindowStart++;
// Check if best window position yet
if (nominalScore > bestSegmentScore)
{
bestSegmentScore = nominalScore;
bestQueryPosition = queryWindowStart;
bestSubjectPosition = subjectWindowStart;
}
}
// Middle of the best window is the seed position
seed.queryOffset = (bestQueryPosition - PSSMatrixFP.matrix) / encoding_numCodes + 5;
seed.subjectOffset = bestSubjectPosition + 5 - subject;
}
return seed;
}
__device__ void alignments_pruneRegion(struct ungappedExtension *ungappedExtension,
struct ungappedExtension *curExtension,
int ungappedExtensionNum)
{
int i;
for (i = 0; i < ungappedExtensionNum; i++)
{
if (ungappedExtension[i].status != ungappedExtension_DELETED)
{
if (ungappedExtension[i].start.queryOffset >= curExtension->start.queryOffset &&
ungappedExtension[i].end.queryOffset <= curExtension->end.queryOffset &&
ungappedExtension[i].start.subjectOffset >= curExtension->start.subjectOffset &&
ungappedExtension[i].end.subjectOffset <= curExtension->end.subjectOffset &&
ungappedExtension[i].nominalScore <= curExtension->nominalScore &&
ungappedExtension + i != curExtension)
{
ungappedExtension[i].status = ungappedExtension_DELETED;
}
}
}
return;
}
// Perform semi-gapped alignment with restricted insertion
__global__ void semiGappedScoring_kernel(struct sequenceDataFP *sequenceDataFP,
unsigned char *sequences,
struct PSSMatrixFP *PSSMatrix,
char *PSSMatrixBody,
struct gappedExtensionParameters *parameters,
int *startLocArray,
int *ungappedExtensionNumArray,
int alignmentNum,
struct ungappedExtension *ungappedExtensions,
int4 *bestScores,
int4 *numGoodExtensions,
int4 *numSemiGapping,
int4 *orderArray,
int4 *bestRowAll,
int4 *insertQRowAll,
int4 dropoff,
int4 nominalR1cutoff)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= alignmentNum)
{
return;
}
int2 openGapPenalty = parameters->semiGappedOpenGap;
int2 extensionGapPenalty = parameters->semiGappedExtendGap;
int4 semiGappedExtensionN = parameters->semiGappedExtensionN;
int4 semiGappedDropoffIncrease = parameters->semiGappedDropoffIncrease;
unsigned char encoding_numCodes = parameters->encoding_numCodes;
PSSMatrix->matrix = PSSMatrixBody + encoding_numCodes;
//int4 ungappedStartLoc = startLocArray[orderArray[tid]];
int4 ungappedStartLoc = startLocArray[tid];
int4 sequenceCount;
struct ungappedExtension *ungappedExtension;
sequenceCount = ungappedExtensions[ungappedStartLoc].sequenceCount;
int4 subjectOffset = sequenceDataFP[sequenceCount].offset;
unsigned char *subject = sequences + subjectOffset;
int4 *bestRow = bestRowAll + subjectOffset;
int4 *insertQRow = insertQRowAll + subjectOffset;
uint4 subjectLength = sequenceDataFP[sequenceCount].sequenceLength;
int4 bestScore = 0;
int numSemiGappingDevice = 0;
uint4 goodExtensionNo, ungappedExtensionNo;
goodExtensionNo = 0;
//for (ungappedExtensionNo = 0; ungappedExtensionNo < ungappedExtensionNumArray[orderArray[tid]]; ungappedExtensionNo++)
for (ungappedExtensionNo = 0; ungappedExtensionNo < ungappedExtensionNumArray[tid]; ungappedExtensionNo++)
{
ungappedExtension = &ungappedExtensions[ungappedStartLoc + ungappedExtensionNo];
if (ungappedExtension->status != ungappedExtension_DELETED)
{
if (ungappedExtension->seed.queryOffset == -1 &&
ungappedExtension->seed.subjectOffset == -1)
{
ungappedExtension->seed = ungappedExtension_findProteinSeed1(ungappedExtension, *PSSMatrix, subject, encoding_numCodes);
}
numSemiGappingDevice++;
ungappedExtension->nominalScore = semiGappedScoring_scoreGPU(ungappedExtension, *PSSMatrix,
subjectLength, subject, dropoff, semiGappedDropoffIncrease,
bestRow, insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN);
ungappedExtension->status = ungappedExtension_SEMIGAPPED;
if (ungappedExtension->nominalScore >= nominalR1cutoff)
{
if (ungappedExtension->nominalScore > bestScore)
{
bestScore = ungappedExtension->nominalScore;
}
}
else
{
ungappedExtension->status = ungappedExtension_DELETED;
}
goodExtensionNo++;
//alignments_pruneRegion(&ungappedExtensions[ungappedStartLoc], ungappedExtension, ungappedExtensionNumArray[orderArray[tid]]);
alignments_pruneRegion(&ungappedExtensions[ungappedStartLoc], ungappedExtension, ungappedExtensionNumArray[tid]);
}
}
numSemiGapping[tid] = numSemiGappingDevice;
//bestScores[orderArray[tid]] = bestScore;
bestScores[tid] = bestScore;
if (bestScore >= nominalR1cutoff)
{
numGoodExtensions[tid] += goodExtensionNo;
}
return;
}
__device__ int4 semiGappedScoring_scoreGPU(struct ungappedExtension *ungappedExtension,
struct PSSMatrixFP PSSMatrix,
int4 subjectSize,
unsigned char *subject,
int4 dropoff,
int4 semiGappedDropoffIncrease,
int4 *bestRow,
int4 *insertQRow,
unsigned char encoding_numCodes,
int4 openGapPenalty,
int4 extensionGapPenalty,
int4 semiGappedExtensionN)
{
struct coordinate seed;
unsigned char *choppedSubject;
struct PSSMatrixFP choppedPSSMatrix;
int4 choppedSubjectSize;
struct dpResults beforeDpResults, afterDpResults;
int4 strandOffset = 0;
// Perform dynamic programming for points before the seed
seed = ungappedExtension->seed;
if (seed.queryOffset > PSSMatrix.strandLength)
{
// If query position is in the second strand, remove first strand from PSSM
strandOffset = PSSMatrix.strandLength;
seed.queryOffset -= PSSMatrix.strandLength;
PSSMatrix = PSSMatrixFP_chop(PSSMatrix, PSSMatrix.strandLength, encoding_numCodes);
}
else
{
// Otherwise remove second strand
PSSMatrix.length = PSSMatrix.strandLength;
}
beforeDpResults = semiGappedScoring_dpBeforeSeedGPU(subject, PSSMatrix,
seed, dropoff + semiGappedDropoffIncrease, bestRow,
insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN);
// Chop the start off the query and subject so they begin at the seed
choppedPSSMatrix = PSSMatrixFP_chop(PSSMatrix, seed.queryOffset, encoding_numCodes);
choppedSubject = subject + seed.subjectOffset;
choppedSubjectSize = subjectSize - seed.subjectOffset;
// Perform dynamic programming for points after the seed
afterDpResults = semiGappedScoring_dpAfterSeedGPU(choppedSubject, choppedPSSMatrix,
dropoff + semiGappedDropoffIncrease, choppedSubjectSize, bestRow,
insertQRow, encoding_numCodes, openGapPenalty,
extensionGapPenalty, semiGappedExtensionN, seed.queryOffset);
// Re-adjust result change due to chopping subject/query and strand adjustment
afterDpResults.best.queryOffset += seed.queryOffset + strandOffset;
afterDpResults.best.subjectOffset += seed.subjectOffset;
beforeDpResults.best.queryOffset += strandOffset;
// Associate best scoring start and end points with the ungapped extension
ungappedExtension->start = beforeDpResults.best;
ungappedExtension->end = afterDpResults.best;
// Determine score by combining score from the two traces, and the match score at
// the seed position
return beforeDpResults.bestScore + afterDpResults.bestScore +
choppedPSSMatrix.matrix[choppedSubject[0]];
}
// Perform dynamic programming to explore possible start points and alignments that end at
// the given seed and find the best score
__device__ struct dpResults semiGappedScoring_dpBeforeSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
struct coordinate seed, int4 dropoff, int4 *bestRowCur, int4 *insertQrowCur,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN)
{
int4 queryPosition, bestQueryPosition;
int4 matrixColumn;
unsigned char *rowDropoff, *columnDropoff;
int4 *bestRow, *insertQrow;
unsigned char *subjectPosition, *bestSubjectPosition, *startSubjectPosition;
int4 bestScore = 0;
int4 insertS, rowOffset;
int4 subjectDistance;
int4 oldBest, match, previousOldBest;
unsigned char rightOfDropoff;
int4 queryCount, subjectCount;
struct dpResults dpResults;
bestSubjectPosition = subjectPosition = startSubjectPosition = subject + seed.subjectOffset - 1;
bestQueryPosition = queryPosition = seed.queryOffset - 1;
// Initialize row pointers
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Set initial row dropoff and column dropoff
rowDropoff = subject;
columnDropoff = subject + seed.subjectOffset;
// Using first column of query matrix
matrixColumn = queryPosition + 1;
// -----FIRST ROW-----
// -----FIRST CELL-----
// Set M value for bottom-right cell
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// M must be the best
*bestRow = match;
// Only gap opens possible
//*insertQrow = insertS = match - parameters_semiGappedOpenGap;
*insertQrow = insertS = match - openGapPenalty;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
subjectDistance = 0;
subjectPosition--; bestRow--; insertQrow--;
// ----- REMAINING CELLS -----
// For each remaining column in the bottom row, scanning from right-to-left
while (subjectPosition >= subject)
{
// Set value for M
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)]
- openGapPenalty - subjectDistance * extensionGapPenalty;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// Set DUMMY Ix value, which should never be used
*insertQrow = constants_gappedExtensionDummyValue;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
rowDropoff = subjectPosition;
// And stop processing row
break;
}
subjectPosition--; bestRow--; insertQrow--;
subjectDistance++;
}
// Start queryCount at N. Only allow insertS for every Nth row when queryCount
// reaches 0
queryCount = semiGappedExtensionN;
// -----REMAINING ROWS-----
//while (queryPosition > PSSMatrixFP.matrix && rowDropoff < columnDropoff)
while (queryPosition > 0 && rowDropoff < columnDropoff)
{
queryPosition--;
queryCount--;
subjectPosition = columnDropoff - 1;
// Determine subjectCount for initial subjectPosition. Is used to only allow
// insertQ when subjectOffset % parameters_semiGappedExtensionN == 0
subjectCount = (int4)(startSubjectPosition - subjectPosition) % semiGappedExtensionN;
if (subjectCount)
subjectCount = semiGappedExtensionN - subjectCount;
// Reset row pointers to start of rows
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Using next column of query matrix
matrixColumn = queryPosition + 1;
// ************ All rows we are not allowing insertS
if (queryCount)
{
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// -----FAR RIGHT CELL-----
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
// -----CELLS RIGHT OF ROW DROPOFF-----
while (subjectPosition >= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Calculate new M value, which is also the best
oldBest = *bestRow;
match = *bestRow = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
// We are allowing insertQ this column
else
{
// Calculate new M value
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Ix
if (match > *insertQrow)
{
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
}
else
{
*bestRow = *insertQrow;
// Since M <= Ix, new Ix must derive from Ix
*insertQrow -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
}
// -----SINGLE CELL LEFT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition >= subject))
{
// Set value for best
*bestRow = match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
if (match + dropoff >= bestScore)
{
// Record dropoff position
rowDropoff = subjectPosition;
}
}
}
// ************ Every Nth row we allow insertS
else
{
// -----FAR RIGHT CELL-----
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// Set DUMMY value for Iy, which should never be used
insertS = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
rightOfDropoff = 1;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
// -----CELLS RIGHT OF ROW DROPOFF-----
while (subjectPosition >= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell (and cells to its right) are below dropoff
if (rightOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
}
}
// ** We are allowing insertQ this column
else
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M, Ix and Iy
if (match > insertS)
{
if (match > *insertQrow)
{
// Match is largest
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
else
{
if (insertS > *insertQrow)
{
// insertS is largest
*bestRow = insertS;
// Dummy Ix
*insertQrow = constants_gappedExtensionDummyValue;
// Calculate new Iy
insertS -= extensionGapPenalty;
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
// If score at current cell (and cells to its right) are below dropoff
if (rightOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are left of the column dropoff for this row
rightOfDropoff = 0;
}
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition--; bestRow--; insertQrow--; subjectCount--;
}
// -----SINGLE CELL LEFT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition >= subject))
{
// Calculate match value
match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set value for best
*bestRow = maximum(match, insertS);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
subjectPosition--; bestRow--; insertQrow--;
}
// -----CELLS LEFT OF ROW DROPOFF -----
if (!(bestScore > *(bestRow + 1) + dropoff))
{
while (subjectPosition >= subject)
{
// Set value for Iy and best
*bestRow = insertS;
insertS = insertS - extensionGapPenalty;
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Stop processing row
subjectPosition--;
break;
}
subjectPosition--; bestRow--; insertQrow--;
}
}
// Record dropoff position
rowDropoff = subjectPosition + 1;
// Clear insertS for next row
insertS = constants_gappedExtensionDummyValue;
// Reset queryCount
queryCount = semiGappedExtensionN;
}
}
dpResults.best.queryOffset = bestQueryPosition;
dpResults.best.subjectOffset = bestSubjectPosition - subject;
dpResults.bestScore = bestScore;
dpResults.traceback = NULL;
return dpResults;
}
// Perform dynamic programming to explore possible END points and alignments that start at
// the given seed and find the best score
__device__ struct dpResults semiGappedScoring_dpAfterSeedGPU(unsigned char* subject, struct PSSMatrixFP PSSMatrixFP,
int4 dropoff, int4 subjectLength, int4 *bestRowCur, int4 *insertQrowCur,
unsigned char encoding_numCodes, int4 openGapPenalty, int4 extensionGapPenalty,
int4 semiGappedExtensionN, int4 queryOffset)
{
int4 queryPosition, bestQueryPosition, queryEnd;
int4 matrixColumn;
unsigned char *rowDropoff, *columnDropoff;
unsigned char *subjectPosition, *bestSubjectPosition, *subjectEnd, *startSubjectPosition;
int4 *bestRow, *insertQrow;
int4 bestScore = 0;
int4 insertS, rowOffset;
int4 subjectDistance;
int4 oldBest, match, previousOldBest;
unsigned char leftOfDropoff;
int4 queryLength;
int4 queryCount, subjectCount;
struct dpResults dpResults;
queryLength = PSSMatrixFP.length;
subjectEnd = subject + subjectLength;
queryEnd = queryLength;
bestSubjectPosition = subjectPosition = startSubjectPosition = subject + 1;
bestQueryPosition = queryPosition = 1;
// Initialize rows
bestRow = bestRowCur + 1;
insertQrow = insertQrowCur + 1;
// Set initial row dropoff and column dropoff
rowDropoff = subject + subjectLength - 1;
columnDropoff = subject;
// -----FIRST ROW-----
// Using first column of query matrix
matrixColumn = queryPosition + 1 + queryOffset;
// -----FIRST CELL-----
// Set M value for top-left cell
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// M must be the best
*bestRow = match;
// Only gap opens possible
*insertQrow = insertS = match - openGapPenalty;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
subjectDistance = 0;
subjectPosition++; bestRow++; insertQrow++;
// ----- REMAINING CELLS -----
// For each remaining columns in the top row, scanning from left-to-right
while (subjectPosition < subjectEnd)
{
// Set value for M
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)]
- openGapPenalty - subjectDistance * extensionGapPenalty;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// Set DUMMY Ix value, which should never be used
*insertQrow = constants_gappedExtensionDummyValue;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
rowDropoff = subjectPosition;
// And stop processing row
break;
}
subjectPosition++; bestRow++; insertQrow++;
subjectDistance++;
}
// Start queryCount at N. Only allow insertS for every Nth row when queryCount
// reaches 0
queryCount = semiGappedExtensionN;
queryPosition += 1;
queryCount--;
// -----REMAINING ROWS-----
while (queryPosition < queryEnd && rowDropoff > columnDropoff)
{
subjectPosition = columnDropoff + 1;
// Determine subjectCount for initial subjectPosition. Is used to only allow
// insertQ when subjectOffset % parameters_semiGappedExtensionN == 0
subjectCount = ((int4)(subjectPosition - startSubjectPosition) % semiGappedExtensionN);
if (subjectCount)
subjectCount = semiGappedExtensionN - subjectCount;
// Reset rows
rowOffset = (subjectPosition - subject);
bestRow = bestRowCur + rowOffset;
insertQrow = insertQrowCur + rowOffset;
// Using next column of query matrix
matrixColumn = queryPosition + 1 + queryOffset;
// ************ All rows we are not allowing insertS
if (queryCount)
{
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// -----FAR LEFT CELL-----
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
// -----CELLS LEFT OF ROW DROPOFF-----
while (subjectPosition <= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Calculate new M value, which is also the best
oldBest = *bestRow;
match = *bestRow = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
// We are allowing insertQ this column
else
{
// Calculate new M value
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Ix
if (match > *insertQrow)
{
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
}
else
{
*bestRow = *insertQrow;
// Since M <= Ix, new Ix must derive from Ix
*insertQrow -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
}
// -----SINGLE CELL RIGHT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition < subjectEnd))
{
// Set value for best
*bestRow = match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
if (match + dropoff >= bestScore)
{
// Record dropoff position
rowDropoff = subjectPosition;
}
}
}
// ************ Every Nth row we allow insertS
else
{
// -----FAR LEFT CELL-----
// ** No insertQ allowed this column, this cell will only get a DUMMY score
if (subjectCount)
{
previousOldBest = *bestRow;
*bestRow = constants_gappedExtensionDummyValue;
// Score at this cell is below dropoff
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
// ** We are allowing insertQ this column
else
{
// Record some old values
previousOldBest = *bestRow;
// Set Ix value
*bestRow = *insertQrow;
*insertQrow -= extensionGapPenalty;
// Set DUMMY value for Iy, which should never be used
insertS = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
leftOfDropoff = 1;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
// -----CELLS LEFT OF ROW DROPOFF-----
while (subjectPosition <= rowDropoff)
{
// ** We are not allowing insertQ this column
if (subjectCount)
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M and Iy
if (match > insertS)
{
*bestRow = match;
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
}
else
{
*bestRow = insertS;
// Since M <= Iy, new Iy must derive from Iy
insertS -= extensionGapPenalty;
}
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
// If score at current cell (and cells to its left) are below dropoff
if (leftOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
}
}
// ** We are allowing insertQ this column
else
{
// Remember old M value (for cell below this one)
oldBest = *bestRow;
match = scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)] + previousOldBest;
previousOldBest = oldBest;
// Determine the best of M, Ix and Iy
if (match > insertS)
{
if (match > *insertQrow)
{
// Match is largest
*bestRow = match;
// Calculate new Ix
*insertQrow = maximum(match - openGapPenalty,
*insertQrow - extensionGapPenalty);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// If this is the best-yet scoring cell
if (match > bestScore)
{
// Update best start cell data
bestScore = match;
bestQueryPosition = queryPosition;
bestSubjectPosition = subjectPosition;
}
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
else
{
if (insertS > *insertQrow)
{
// insertS is largest
*bestRow = insertS;
// Dummy Ix
*insertQrow = constants_gappedExtensionDummyValue;
// Calculate new Iy
insertS -= extensionGapPenalty;
}
else
{
// insertQ is largest
*bestRow = *insertQrow;
// Calculate new Ix
*insertQrow -= extensionGapPenalty;
// Dummy Iy
insertS = constants_gappedExtensionDummyValue;
}
}
// If score at current cell (and cells to its left) are below dropoff
if (leftOfDropoff)
{
if (bestScore > *bestRow + dropoff)
{
// Record dropoff position
columnDropoff = subjectPosition;
}
else
{
// We are right of the column dropoff for this row
leftOfDropoff = 0;
}
}
// Reset subjectCount
subjectCount = semiGappedExtensionN;
}
subjectPosition++; bestRow++; insertQrow++; subjectCount--;
}
// -----SINGLE CELL RIGHT OF ROW DROPOFF -----
if (!(bestScore > previousOldBest + dropoff) && (subjectPosition < subjectEnd))
{
// Calculate match value
match = previousOldBest + scoreMatrixC[querySequenceC[matrixColumn] * encoding_numCodes + (*subjectPosition)];
// Set value for best
*bestRow = maximum(match, insertS);
// Calculate new Iy
insertS = maximum(match - openGapPenalty,
insertS - extensionGapPenalty);
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
subjectPosition++; bestRow++; insertQrow++;
}
// -----CELLS RIGHT OF ROW DROPOFF -----
if (!(bestScore > *(bestRow - 1) + dropoff))
{
while (subjectPosition < subjectEnd)
{
// Set value for Iy and best
*bestRow = insertS;
insertS = insertS - extensionGapPenalty;
// Set DUMMY values for Ix
*insertQrow = constants_gappedExtensionDummyValue;
// If score at current cell is below dropoff
if (bestScore > *bestRow + dropoff)
{
// Stop processing row
subjectPosition++;
break;
}
subjectPosition++; bestRow++; insertQrow++;
}
}
// Record dropoff position
rowDropoff = subjectPosition - 1;
// Clear insertS for next row
insertS = constants_gappedExtensionDummyValue;
// Reset queryCount
queryCount = semiGappedExtensionN;
}
//queryPosition += encoding_numCodes; queryCount--;
queryPosition += 1; queryCount--;
}
dpResults.best.queryOffset = bestQueryPosition;
dpResults.best.subjectOffset = bestSubjectPosition - subject;
dpResults.bestScore = bestScore;
dpResults.traceback = NULL;
return dpResults;
}
|
5d4f688e8d08a0376a2a8f09a8f63bdaf522d990.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENCE.txt for license information
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
#include <roctracer/roctx.h>
int csv = false;
int errors = 0;
double avg_bw = 0.0;
int avg_count = 0;
bool is_reduction = true;
template<typename T>
void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type,
const ncclRedOp_t op, ncclComm_t* comms, const std::vector<int>& dList) {
// initialize data
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(buffer, 0, N * sizeof(T));
memset(result, 0, N * sizeof(T));
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamCreate(s+i));
CUDACHECK(hipMemset(recvbuff[i], 0, N * sizeof(T)));
Randomize(sendbuff[i], N, i);
if(i == 0) {
CUDACHECK(hipMemcpy(result, sendbuff[i], N*sizeof(T), hipMemcpyDeviceToHost));
} else {
Accumulate<T>(result, sendbuff[i], N, op);
}
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i], ::min(N, 1024 * 1024), type, op, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
printf((csv) ? "%i,%i,%s,%s," : "%12i %12i %6s %6s",
(int) (n * sizeof(T)), n, TypeName(type).c_str(),
OperationName(op).c_str());
// do out-of-place reduction first
roctxRangePushA("out of place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op,
comms[i], s[i]));
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
roctxRangePop();
roctxRangePushA("out of place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw * (double)(2 * nDev - 2) / (double)nDev;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(recvbuff[i], result, N);
maxDelta = ::max(tmpDelta, maxDelta);
}
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
if (maxDelta > deltaMaxValue(type, is_reduction)) errors++;
avg_bw += busbw;
avg_count++;
roctxRangePop();
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
// now do in-place reduction
roctxRangePushA("in place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op,
comms[i], s[i]));
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
roctxRangePop();
roctxRangePushA("in place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw * (double)(2 * nDev - 2) / (double)nDev;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(sendbuff[i], result, N);
maxDelta = ::max(tmpDelta, maxDelta);
}
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
if (maxDelta > deltaMaxValue(type, is_reduction)) errors++;
avg_bw += busbw;
avg_count++;
roctxRangePop();
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* comms,
const std::vector<int>& dList) {
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
T** sendbuff = (T**)malloc(nDev * sizeof(T*));
T** recvbuff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipMalloc(sendbuff + i, N * sizeof(T)));
CUDACHECK(hipMalloc(recvbuff + i, N * sizeof(T)));
}
for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) {
// for (ncclRedOp_t op : { ncclSum }) {
RunTest<T>(sendbuff, recvbuff, N, type, op, comms, dList);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipFree(sendbuff[i]));
CUDACHECK(hipFree(recvbuff[i]));
}
free(sendbuff);
free(recvbuff);
}
void usage() {
printf("Tests nccl AllReduce with user supplied arguments.\n"
" Usage: all_reduce_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(hipGetDeviceCount(&nVis));
int N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%d", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);
NCCLCHECK(ncclCommInitAll(comms, nDev, dList.data()));
if (!csv) {
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %6s out-of-place in-place\n", "", "", "", "");
printf("# %10s %12s %6s %6s %7s %5s %5s %7s %7s %5s %5s %7s\n", "bytes", "N", "type", "op",
"time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res");
}
else {
printf("B,N,type,op,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n");
}
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i=0; i<nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
avg_bw /= avg_count;
printf(" Out of bounds values : %d %s\n", errors, errors ? "FAILED" : "OK");
printf(" Avg bus bandwidth : %g %s\n", avg_bw, check_avg_bw == -1 ? "" : (avg_bw < check_avg_bw ? "FAILED" : "OK"));
printf("\n");
if (errors || avg_bw < check_avg_bw)
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
| 5d4f688e8d08a0376a2a8f09a8f63bdaf522d990.cu | /*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENCE.txt for license information
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
#include <nvToolsExt.h>
int csv = false;
int errors = 0;
double avg_bw = 0.0;
int avg_count = 0;
bool is_reduction = true;
template<typename T>
void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type,
const ncclRedOp_t op, ncclComm_t* comms, const std::vector<int>& dList) {
// initialize data
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(buffer, 0, N * sizeof(T));
memset(result, 0, N * sizeof(T));
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamCreate(s+i));
CUDACHECK(cudaMemset(recvbuff[i], 0, N * sizeof(T)));
Randomize(sendbuff[i], N, i);
if(i == 0) {
CUDACHECK(cudaMemcpy(result, sendbuff[i], N*sizeof(T), cudaMemcpyDeviceToHost));
} else {
Accumulate<T>(result, sendbuff[i], N, op);
}
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i], std::min(N, 1024 * 1024), type, op, comms[i], s[i]));
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
printf((csv) ? "%i,%i,%s,%s," : "%12i %12i %6s %6s",
(int) (n * sizeof(T)), n, TypeName(type).c_str(),
OperationName(op).c_str());
// do out-of-place reduction first
nvtxRangePushA("out of place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op,
comms[i], s[i]));
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
nvtxRangePop();
nvtxRangePushA("out of place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw * (double)(2 * nDev - 2) / (double)nDev;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(recvbuff[i], result, N);
maxDelta = std::max(tmpDelta, maxDelta);
}
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
if (maxDelta > deltaMaxValue(type, is_reduction)) errors++;
avg_bw += busbw;
avg_count++;
nvtxRangePop();
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
// now do in-place reduction
nvtxRangePushA("in place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
NCCLCHECK(ncclAllReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op,
comms[i], s[i]));
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
nvtxRangePop();
nvtxRangePushA("in place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw * (double)(2 * nDev - 2) / (double)nDev;
double maxDelta = 0.0;
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
double tmpDelta = CheckDelta<T>(sendbuff[i], result, N);
maxDelta = std::max(tmpDelta, maxDelta);
}
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
if (maxDelta > deltaMaxValue(type, is_reduction)) errors++;
avg_bw += busbw;
avg_count++;
nvtxRangePop();
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* comms,
const std::vector<int>& dList) {
int nDev = 0;
NCCLCHECK(ncclCommCount(comms[0], &nDev));
T** sendbuff = (T**)malloc(nDev * sizeof(T*));
T** recvbuff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaMalloc(sendbuff + i, N * sizeof(T)));
CUDACHECK(cudaMalloc(recvbuff + i, N * sizeof(T)));
}
for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) {
// for (ncclRedOp_t op : { ncclSum }) {
RunTest<T>(sendbuff, recvbuff, N, type, op, comms, dList);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaFree(sendbuff[i]));
CUDACHECK(cudaFree(recvbuff[i]));
}
free(sendbuff);
free(recvbuff);
}
void usage() {
printf("Tests nccl AllReduce with user supplied arguments.\n"
" Usage: all_reduce_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(cudaGetDeviceCount(&nVis));
int N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%d", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);
NCCLCHECK(ncclCommInitAll(comms, nDev, dList.data()));
if (!csv) {
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %6s out-of-place in-place\n", "", "", "", "");
printf("# %10s %12s %6s %6s %7s %5s %5s %7s %7s %5s %5s %7s\n", "bytes", "N", "type", "op",
"time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res");
}
else {
printf("B,N,type,op,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n");
}
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i=0; i<nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
avg_bw /= avg_count;
printf(" Out of bounds values : %d %s\n", errors, errors ? "FAILED" : "OK");
printf(" Avg bus bandwidth : %g %s\n", avg_bw, check_avg_bw == -1 ? "" : (avg_bw < check_avg_bw ? "FAILED" : "OK"));
printf("\n");
if (errors || avg_bw < check_avg_bw)
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
|
6050ea79db2ff818ecf50bca051749319b487988.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add_kernel(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
void handle(int gpu_number)
{
int N_GPU;
hipGetDeviceCount(&N_GPU);
//printf("gpu count : %d\n",N_GPU);
//Arrange the task of each GPU
int N = ((1<<30)+N_GPU - 1)/N_GPU;
hipSetDevice(gpu_number);
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
float time_elapsed=0;
hipEvent_t start,stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
hipEventRecord( start,0); //
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( add_kernel), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipEventRecord(stop,0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed,start,stop); //
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
printf("card%d %f(ms)\n",gpu_number,time_elapsed);
} | 6050ea79db2ff818ecf50bca051749319b487988.cu | #include <stdio.h>
__global__ void add_kernel(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
void handle(int gpu_number)
{
int N_GPU;
cudaGetDeviceCount(&N_GPU);
//printf("gpu count : %d\n",N_GPU);
//Arrange the task of each GPU
int N = ((1<<30)+N_GPU - 1)/N_GPU;
cudaSetDevice(gpu_number);
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
float time_elapsed=0;
cudaEvent_t start,stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
cudaEventRecord( start,0); //记录当前时间
// Perform SAXPY on 1M elements
add_kernel<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaEventRecord(stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
printf("card%d 执行时间:%f(ms)\n",gpu_number,time_elapsed);
} |
bef9da2dccda6d81e280fc0e1c6ee590257a7cbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define PRECISION_z
#define BLOCK_SIZE 32
//#define num_threads 64
#define dgemv_bs 32
#if (!defined(PRECISION_z)) || (GPUSHMEM >= 200)
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__device__ int flag = 0;
__global__ void
l_zlat2c_special(
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ hipDoubleComplex la[dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = blockIdx.x* dgemv_bs ;
hipDoubleComplex temp ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[j*lda];
__syncthreads();
A += dgemv_bs ;
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
__syncthreads();
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info[0] = flag+ la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
__global__ void
l_zlat2c_generic(
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ hipDoubleComplex la [dgemv_bs][dgemv_bs+1];
hipDoubleComplex temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp= A[count] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[count] = cuComplexDoubleToFloat(temp);
count++;
}
}
else{
}
//la[tx][ty] = flag ;
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 ) {
// info[ind] = ld[tx][0] + ld[tx][1] + ld[tx][2] + ld[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
****************************************
-------------------------------------*/
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[ j * lda];
A+= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
__syncthreads();
//la[tx] [ty ] = flag ;
//__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 )
// {
// info [ind] = flag + la[tx][1]+ la[tx][2]+ la[tx][3] ;
// }
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_zlat2c_generic(
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ hipDoubleComplex la [dgemv_bs][dgemv_bs+1];
int blockIdxx = blockIdx.x ;
hipDoubleComplex temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp = A[-count] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-count] = cuComplexDoubleToFloat(temp);
count++;
}
}
else{
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info [ind] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
****************************************
-------------------------------------*/
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
la[tx][31-ty-j] = A[ -j * lda];
}
A-= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
// temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = cuComplexDoubleToFloat(temp);
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx] [ty] = flag ;
//__syncthreads();
//
//if( ty == 0 ) {
// // info[ind] = flag + la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_zlat2c_special (
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
__shared__ hipDoubleComplex la [dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
hipDoubleComplex temp ;
int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
// la[tx][ty+j] = A[-j*lda] ;
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[tx][31-ty-j] = A[ -j * lda];
/*
Look at the indexing changes
*/
A-= dgemv_bs ;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = cuComplexDoubleToFloat(temp);
}
//la[tx][ty] = flag ;
//
//__syncthreads();
//
//if( ty == 0 ) {
// // info[0] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
extern "C" void
mzlat2c(
char uplo, magma_int_t m,
const hipDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_zlat2c_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_zlat2c_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_zlat2c_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_zlat2c_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_zlat2c(
char uplo, magma_int_t n,
const hipDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a COMPLEX_16 triangular
matrix A to COMPLEX triangular matrix SA.
*/
*info = 0;
mzlat2c( uplo, n, A, lda, SA, ldsa, info );
}
///////////////////////////////////////////////////////////////////////////////////////////
#else
///////////////////////////////////////////////////////////////////////////////////////////
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__global__ void
l_zlat2c_special (
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = (blockIdx.x+1)* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
__global__ void
l_zlat2c_generic(
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[count] = cuComplexDoubleToFloat(A[count]);
count++;
}
}
else{
}
__syncthreads();
}
else{
/* **************************************
-- All the blocks but the last one --
************************************** */
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = (blockIdx.x+1)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_zlat2c_generic(
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
int blockIdxx = blockIdx.x ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[-count] = cuComplexDoubleToFloat(A[-count]);
count++;
}
}
else{
}
}
else{
/* **************************************
-- All the blocks but the last one --
-- By the way this code can be optimized more.
************************************** */
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_zlat2c_special (
int n,
const hipDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdx.x )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
extern "C" void
mzlat2c(
char uplo, magma_int_t m,
const hipDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_zlat2c_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_zlat2c_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
hipLaunchKernelGGL(( l_zlat2c_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
hipLaunchKernelGGL(( u_zlat2c_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_zlat2c(
char uplo, magma_int_t n,
const hipDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a COMPLEX_16 triangular
matrix A (along with its block diagonal entries) to a COMPLEX
triangular matrix SA (along with its block diagonal entries).
*/
*info = 0;
mzlat2c( uplo, n, A, lda, SA, ldsa, info );
/*
int val = hipblasIdamax(n, WORK, 1);
double retVal[1];
hipblasGetMatrix( 1, 1, sizeof( double ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
*/
}
#endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */
| bef9da2dccda6d81e280fc0e1c6ee590257a7cbc.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define PRECISION_z
#define BLOCK_SIZE 32
//#define num_threads 64
#define dgemv_bs 32
#if (!defined(PRECISION_z)) || (GPUSHMEM >= 200)
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__device__ int flag = 0;
__global__ void
l_zlat2c_special(
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ cuDoubleComplex la[dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = blockIdx.x* dgemv_bs ;
cuDoubleComplex temp ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[j*lda];
__syncthreads();
A += dgemv_bs ;
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
__syncthreads();
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info[0] = flag+ la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
__global__ void
l_zlat2c_generic(
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ cuDoubleComplex la [dgemv_bs][dgemv_bs+1];
cuDoubleComplex temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp= A[count] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[count] = cuComplexDoubleToFloat(temp);
count++;
}
}
else{
}
//la[tx][ty] = flag ;
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 ) {
// info[ind] = ld[tx][0] + ld[tx][1] + ld[tx][2] + ld[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
****************************************
-------------------------------------*/
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[ty+j][tx] = A[ j * lda];
A+= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i < tx ) {
la[tx][i] = la[i][tx] ;
}
else
la[tx][i] = la[tx][i] ;
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = cuComplexDoubleToFloat(temp);
}
__syncthreads();
//la[tx] [ty ] = flag ;
//__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//if( ty == 0 )
// {
// info [ind] = flag + la[tx][1]+ la[tx][2]+ la[tx][3] ;
// }
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_zlat2c_generic(
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
__shared__ cuDoubleComplex la [dgemv_bs][dgemv_bs+1];
int blockIdxx = blockIdx.x ;
cuDoubleComplex temp ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
temp = A[-count] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-count] = cuComplexDoubleToFloat(temp);
count++;
}
}
else{
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx][ty] = flag ;
//__syncthreads();
//if( ty == 0 ) {
// // info [ind] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
else{
/***************************************
-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
****************************************
-------------------------------------*/
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
la[tx][31-ty-j] = A[ -j * lda];
}
A-= dgemv_bs ;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
// temp = la[ty+j][tx] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = cuComplexDoubleToFloat(temp);
}
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
//la[tx] [ty] = flag ;
//__syncthreads();
//
//if( ty == 0 ) {
// // info[ind] = flag + la[tx] [1] + la[tx] [2] + la[tx] [3] ;
//}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_zlat2c_special (
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
double mRMAX = - RMAX;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
__shared__ cuDoubleComplex la [dgemv_bs][dgemv_bs+1];
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
cuDoubleComplex temp ;
int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4){
// la[tx][ty+j] = A[-j*lda] ;
temp = A[-j*lda] ;
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[-j*ldsa] = cuComplexDoubleToFloat(temp);
}
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4)
la[tx][31-ty-j] = A[ -j * lda];
/*
Look at the indexing changes
*/
A-= dgemv_bs ;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){
if ( i <tx ){
la[tx][i] = la[i][tx];
}
else{
la[tx][i] = la[tx][i] ;
}
}
__syncthreads();
#pragma unroll 8
for(int j =0; j<dgemv_bs; j+=4){
temp = la[tx][31-ty-j];
if( (cuCreal(temp) < mRMAX) || (cuCreal(temp) > RMAX)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (cuCimag(temp) < mRMAX) || (cuCimag(temp) > RMAX)
#endif
)
{
flag = 1;
}
SA[- j*ldsa] = cuComplexDoubleToFloat(temp);
}
//la[tx][ty] = flag ;
//
//__syncthreads();
//
//if( ty == 0 ) {
// // info[0] = flag + la[tx][1] + la[tx][2] + la[tx][3] ;
//}
}
extern "C" void
mzlat2c(
char uplo, magma_int_t m,
const cuDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
l_zlat2c_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
else{
u_zlat2c_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
l_zlat2c_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
u_zlat2c_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_zlat2c(
char uplo, magma_int_t n,
const cuDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a COMPLEX_16 triangular
matrix A to COMPLEX triangular matrix SA.
*/
*info = 0;
mzlat2c( uplo, n, A, lda, SA, ldsa, info );
}
///////////////////////////////////////////////////////////////////////////////////////////
#else
///////////////////////////////////////////////////////////////////////////////////////////
/*------------------------------------------ UPLO = 'L' ----------------------------------*/
__global__ void
l_zlat2c_special (
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
A += ind;
SA += ind;
A += ty * lda;
SA += ty * ldsa;
int break_d = (blockIdx.x+1)* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
__global__ void
l_zlat2c_generic(
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < m_mod_32 ){
A+= ( blockIdx.x * dgemv_bs + tx ) ;
SA+= ( blockIdx.x * dgemv_bs + tx ) ;
}
else{
A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
SA+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ;
}
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = blockIdx.x* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count = tx ;
else
count = m_mod_32 ;
for(j =0; j<=count; j++)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += (tx)*lda;
SA += (tx)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[count] = cuComplexDoubleToFloat(A[count]);
count++;
}
}
else{
}
__syncthreads();
}
else{
/* **************************************
-- All the blocks but the last one --
************************************** */
A += ind;
SA += ind;
A+= ty * lda ;
SA+= ty * ldsa ;
int break_d = (blockIdx.x+1)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[j*ldsa] = cuComplexDoubleToFloat(A[j*lda]);
A += lda *dgemv_bs ;
SA += ldsa*dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/* Generic Case*/
__global__ void
u_zlat2c_generic(
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
int m_full_block, int m_mod_32, magma_int_t *info, double RMAX, int ldsa)
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
int blockIdxx = blockIdx.x ;
if( blockIdx.x == m_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
if ( tx < m_mod_32 ){
A+= ( tx ) ;
SA+= ( tx ) ;
}
else{
A+= ( m_mod_32 -1) ;
SA+= ( m_mod_32 -1) ;
}
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (blockIdx.x)* dgemv_bs ;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A -= lda *dgemv_bs ;
SA -= ldsa*dgemv_bs ;
}
/*
we don't need to make zero, as those computation will be discarded.
*/
if( ty==0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j ;
int count = 1 ;
if( tx < m_mod_32 )
count =m_mod_32- tx ;
else
count = m_mod_32 ;
for(j =0; j<count; j++)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=(count-1)*lda;
SA-=(count-1)*ldsa;
count = 1 ;
for(; j<m_mod_32; j++){
SA[-count] = cuComplexDoubleToFloat(A[-count]);
count++;
}
}
else{
}
}
else{
/* **************************************
-- All the blocks but the last one --
-- By the way this code can be optimized more.
************************************** */
ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ;
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdxx )* dgemv_bs ;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
}
/*- ---------------------------------------------- UPLO = 'U' ----------------------------------*/
/*Good Dimension*/
__global__ void
u_zlat2c_special (
int n,
const cuDoubleComplex *A, int lda,
cuFloatComplex *SA,
magma_int_t *info, double RMAX, int ldsa )
{
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int ind = blockIdx.x* dgemv_bs + tx ;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A+= lda*(n-1) ;
SA+= ldsa*(n-1) ;
A += ind;
SA += ind;
A-= ty * lda ;
SA-= ty * ldsa ;
int break_d = (n / dgemv_bs - blockIdx.x )* dgemv_bs ;
for(int i=0; i<break_d; i += dgemv_bs ){
#pragma unroll 8
for(int j=0; j < dgemv_bs ; j+=4)
SA[-j*ldsa] = cuComplexDoubleToFloat(A[-j*lda]);
A-=lda* dgemv_bs ;
SA-=ldsa* dgemv_bs ;
}
}
extern "C" void
mzlat2c(
char uplo, magma_int_t m,
const cuDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
Note:
The UPLO = 'U' Version can be optimized more.
*/
double RMAX = (double)lapackf77_slamch("O");
int blocks;
if (m % dgemv_bs==0)
blocks = m/ dgemv_bs;
else
blocks = m/ dgemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(32, 4, 1);
if( m % dgemv_bs == 0 ) {
if( uplo == 'L' || uplo == 'l'){
l_zlat2c_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
else{
u_zlat2c_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, info, RMAX, ldsa );
}
}
else{
int m_full_block = (m - m % 32 ) /32 ;
int m_mod_32 = m%32 ;
if( uplo == 'L' || uplo == 'l'){
l_zlat2c_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
else{
u_zlat2c_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, SA, m_full_block, m_mod_32, info, RMAX, ldsa );
}
}
}
/*
Interface ..................................
Reproduced from dlansy routines...
How to deliver the info.
*/
extern "C" void
magmablas_zlat2c(
char uplo, magma_int_t n,
const cuDoubleComplex *A, magma_int_t lda,
cuFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info)
{
/*
The routine converts a COMPLEX_16 triangular
matrix A (along with its block diagonal entries) to a COMPLEX
triangular matrix SA (along with its block diagonal entries).
*/
*info = 0;
mzlat2c( uplo, n, A, lda, SA, ldsa, info );
/*
int val = cublasIdamax(n, WORK, 1);
double retVal[1];
cublasGetMatrix( 1, 1, sizeof( double ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
*/
}
#endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */
|
70017f837b512b9e3c4291214b501f51d234b9e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "chunk.h"
#include <hip/hip_runtime.h>
#define NV_CUDA_CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << hipGetErrorString(status) << " in file " << __FILE__ \
<< " at line " << __LINE__ << std::endl; \
abort(); \
} \
}
namespace nvinfer1
{
Chunk::Chunk()
{
}
Chunk::Chunk(const void* buffer, size_t size)
{
assert(size == sizeof(_n_size_split));
_n_size_split = *reinterpret_cast<const int*>(buffer);
}
Chunk::~Chunk()
{
}
int Chunk::getNbOutputs() const
{
return 2;
}
Dims Chunk::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(nbInputDims == 1);
assert(index == 0 || index == 1);
return Dims3(inputs[0].d[0] / 2, inputs[0].d[1], inputs[0].d[2]);
}
int Chunk::initialize()
{
return 0;
}
void Chunk::terminate()
{
}
size_t Chunk::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int Chunk::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
hipStream_t stream)
{
//batch
for (int b = 0; b < batchSize; ++b)
{
NV_CUDA_CHECK(hipMemcpy((char*)outputs[0] + b * _n_size_split, (char*)inputs[0] + b * 2 * _n_size_split, _n_size_split, hipMemcpyDeviceToDevice));
NV_CUDA_CHECK(hipMemcpy((char*)outputs[1] + b * _n_size_split, (char*)inputs[0] + b * 2 * _n_size_split + _n_size_split, _n_size_split, hipMemcpyDeviceToDevice));
}
// NV_CUDA_CHECK(hipMemcpy(outputs[0], inputs[0], _n_size_split, hipMemcpyDeviceToDevice));
// NV_CUDA_CHECK(hipMemcpy(outputs[1], (void*)((char*)inputs[0] + _n_size_split), _n_size_split, hipMemcpyDeviceToDevice));
return 0;
}
size_t Chunk::getSerializationSize() const
{
return sizeof(_n_size_split);
}
void Chunk::serialize(void *buffer)const
{
*reinterpret_cast<int*>(buffer) = _n_size_split;
}
const char* Chunk::getPluginType()const
{
return "CHUNK_TRT";
}
const char* Chunk::getPluginVersion() const
{
return "1.0";
}
void Chunk::destroy()
{
delete this;
}
void Chunk::setPluginNamespace(const char* pluginNamespace)
{
_s_plugin_namespace = pluginNamespace;
}
const char* Chunk::getPluginNamespace() const
{
return _s_plugin_namespace.c_str();
}
DataType Chunk::getOutputDataType(int index,
const nvinfer1::DataType* inputTypes,
int nbInputs) const
{
assert(index == 0 || index == 1);
return DataType::kFLOAT;
}
bool Chunk::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
bool Chunk::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void Chunk::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) {}
void Chunk::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
_n_size_split = in->dims.d[0] / 2 * in->dims.d[1] * in->dims.d[2] *sizeof(float);
}
void Chunk::detachFromContext() {}
// Clone the plugin
IPluginV2IOExt* Chunk::clone() const
{
Chunk *p = new Chunk();
p->_n_size_split = _n_size_split;
p->setPluginNamespace(_s_plugin_namespace.c_str());
return p;
}
//----------------------------
PluginFieldCollection ChunkPluginCreator::_fc{};
std::vector<PluginField> ChunkPluginCreator::_vec_plugin_attributes;
ChunkPluginCreator::ChunkPluginCreator()
{
_vec_plugin_attributes.clear();
_fc.nbFields = _vec_plugin_attributes.size();
_fc.fields = _vec_plugin_attributes.data();
}
const char* ChunkPluginCreator::getPluginName() const
{
return "CHUNK_TRT";
}
const char* ChunkPluginCreator::getPluginVersion() const
{
return "1.0";
}
const PluginFieldCollection* ChunkPluginCreator::getFieldNames()
{
return &_fc;
}
IPluginV2IOExt* ChunkPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
Chunk* obj = new Chunk();
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
IPluginV2IOExt* ChunkPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
Chunk* obj = new Chunk(serialData,serialLength);
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
void ChunkPluginCreator::setPluginNamespace(const char* libNamespace)
{
_s_name_space = libNamespace;
}
const char* ChunkPluginCreator::getPluginNamespace() const
{
return _s_name_space.c_str();
}
}//namespace nvinfer1
| 70017f837b512b9e3c4291214b501f51d234b9e6.cu | #include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "chunk.h"
#include <cuda_runtime.h>
#define NV_CUDA_CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \
<< " at line " << __LINE__ << std::endl; \
abort(); \
} \
}
namespace nvinfer1
{
Chunk::Chunk()
{
}
Chunk::Chunk(const void* buffer, size_t size)
{
assert(size == sizeof(_n_size_split));
_n_size_split = *reinterpret_cast<const int*>(buffer);
}
Chunk::~Chunk()
{
}
int Chunk::getNbOutputs() const
{
return 2;
}
Dims Chunk::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(nbInputDims == 1);
assert(index == 0 || index == 1);
return Dims3(inputs[0].d[0] / 2, inputs[0].d[1], inputs[0].d[2]);
}
int Chunk::initialize()
{
return 0;
}
void Chunk::terminate()
{
}
size_t Chunk::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int Chunk::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
cudaStream_t stream)
{
//batch
for (int b = 0; b < batchSize; ++b)
{
NV_CUDA_CHECK(cudaMemcpy((char*)outputs[0] + b * _n_size_split, (char*)inputs[0] + b * 2 * _n_size_split, _n_size_split, cudaMemcpyDeviceToDevice));
NV_CUDA_CHECK(cudaMemcpy((char*)outputs[1] + b * _n_size_split, (char*)inputs[0] + b * 2 * _n_size_split + _n_size_split, _n_size_split, cudaMemcpyDeviceToDevice));
}
// NV_CUDA_CHECK(cudaMemcpy(outputs[0], inputs[0], _n_size_split, cudaMemcpyDeviceToDevice));
// NV_CUDA_CHECK(cudaMemcpy(outputs[1], (void*)((char*)inputs[0] + _n_size_split), _n_size_split, cudaMemcpyDeviceToDevice));
return 0;
}
size_t Chunk::getSerializationSize() const
{
return sizeof(_n_size_split);
}
void Chunk::serialize(void *buffer)const
{
*reinterpret_cast<int*>(buffer) = _n_size_split;
}
const char* Chunk::getPluginType()const
{
return "CHUNK_TRT";
}
const char* Chunk::getPluginVersion() const
{
return "1.0";
}
void Chunk::destroy()
{
delete this;
}
void Chunk::setPluginNamespace(const char* pluginNamespace)
{
_s_plugin_namespace = pluginNamespace;
}
const char* Chunk::getPluginNamespace() const
{
return _s_plugin_namespace.c_str();
}
DataType Chunk::getOutputDataType(int index,
const nvinfer1::DataType* inputTypes,
int nbInputs) const
{
assert(index == 0 || index == 1);
return DataType::kFLOAT;
}
bool Chunk::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
bool Chunk::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void Chunk::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) {}
void Chunk::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
_n_size_split = in->dims.d[0] / 2 * in->dims.d[1] * in->dims.d[2] *sizeof(float);
}
void Chunk::detachFromContext() {}
// Clone the plugin
IPluginV2IOExt* Chunk::clone() const
{
Chunk *p = new Chunk();
p->_n_size_split = _n_size_split;
p->setPluginNamespace(_s_plugin_namespace.c_str());
return p;
}
//----------------------------
PluginFieldCollection ChunkPluginCreator::_fc{};
std::vector<PluginField> ChunkPluginCreator::_vec_plugin_attributes;
ChunkPluginCreator::ChunkPluginCreator()
{
_vec_plugin_attributes.clear();
_fc.nbFields = _vec_plugin_attributes.size();
_fc.fields = _vec_plugin_attributes.data();
}
const char* ChunkPluginCreator::getPluginName() const
{
return "CHUNK_TRT";
}
const char* ChunkPluginCreator::getPluginVersion() const
{
return "1.0";
}
const PluginFieldCollection* ChunkPluginCreator::getFieldNames()
{
return &_fc;
}
IPluginV2IOExt* ChunkPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
Chunk* obj = new Chunk();
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
IPluginV2IOExt* ChunkPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
Chunk* obj = new Chunk(serialData,serialLength);
obj->setPluginNamespace(_s_name_space.c_str());
return obj;
}
void ChunkPluginCreator::setPluginNamespace(const char* libNamespace)
{
_s_name_space = libNamespace;
}
const char* ChunkPluginCreator::getPluginNamespace() const
{
return _s_name_space.c_str();
}
}//namespace nvinfer1
|
c5eb0ecb88888a4d076fde838fbca836078f7ee2.hip | // !!! This is a file automatically generated by hipify!!!
// Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <hip/hip_runtime.h>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void
MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
// TODO : Kernel Function
// C = A * B
// -->
unsigned long long start_time = clock64();
int tid = threadIdx.x;
int row = tid/WIDTH;
int col = tid%WIDTH;
float lC= 0.0;
for(int k = 0; k < WIDTH; k++)
{
lC += A[row*WIDTH+k] * B[k*WIDTH + col];
// C[row*WIDTH + col] += A[row*WIDTH+k] * B[k*WIDTH + col];
}
C[row*WIDTH + col] = lC;
// <--
unsigned long long stop_time = clock64();
runtime[tid] = (unsigned long long)(stop_time - start_time);
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given hipMalloc code properly
// -->
float* d_A = NULL;
err = hipMalloc((void**)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = hipMalloc((void**)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = hipMalloc((void**)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
hipMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 1;// FILL HERE
int threadsPerBlock = WIDTH*WIDTH;// FILL HERE
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// MatrixMul(d_A, d_B, d_C);
hipLaunchKernelGGL(( MatrixMul) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_runtime);
// <--
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given hipFree statement according to your data allocation
// -->
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
#ifdef TM
hipFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
| c5eb0ecb88888a4d076fde838fbca836078f7ee2.cu | // Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <cuda_runtime.h>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void
MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
// TODO : Kernel Function
// C = A * B
// -->
unsigned long long start_time = clock64();
int tid = threadIdx.x;
int row = tid/WIDTH;
int col = tid%WIDTH;
float lC= 0.0;
for(int k = 0; k < WIDTH; k++)
{
lC += A[row*WIDTH+k] * B[k*WIDTH + col];
// C[row*WIDTH + col] += A[row*WIDTH+k] * B[k*WIDTH + col];
}
C[row*WIDTH + col] = lC;
// <--
unsigned long long stop_time = clock64();
runtime[tid] = (unsigned long long)(stop_time - start_time);
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given cudaMalloc code properly
// -->
float* d_A = NULL;
err = cudaMalloc((void**)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = cudaMalloc((void**)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = cudaMalloc((void**)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
cudaMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 1;// FILL HERE
int threadsPerBlock = WIDTH*WIDTH;// FILL HERE
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// MatrixMul(d_A, d_B, d_C);
MatrixMul <<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_runtime);
// <--
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given cudaFree statement according to your data allocation
// -->
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
#ifdef TM
cudaFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
|
2efabab2276cdf29a878d19d5cb1e303117d1218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/local_response_normalization_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LRNFillScaleNCHW(const int nthreads, const T* in,
const int num, const int channels, const int height,
const int width, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// recover the pointers for the next loop.
in -= offset;
scale -= offset;
}
}
template <typename T>
__global__ void LRNFillScaleNHWC(const int nthreads, const T* in,
const int num, const int height, const int width,
const int channels, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pre_pad = (size - 1) / 2;
scale[index] = 0;
for (int i = 0; i < size; ++i) {
int raw_idx = c + i - pre_pad;
if (raw_idx >= 0 && raw_idx < channels) {
scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad];
}
}
scale[index] = bias + scale[index] * alpha_over_size;
}
}
// TODO(Yangqing): check if it would be faster to just put it into the previous
// kernel.
template <typename T>
__global__ void LRNComputeOutput(const int nthreads, const T* in,
const T* scale, const T negative_beta, T* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename T>
__global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const T negative_beta,
const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// recover pointer for next iteration.
bottom_data -= offset;
top_data -= offset;
scale -= offset;
top_diff -= offset;
bottom_diff -= offset;
}
}
// This local response normalization gradient does one sum per output location
// and does not use the running trick for 1-d convolution: thus it might not be
// the fastest implementation.
template <typename T>
__global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int height, const int width, const int channels,
const int size, const T negative_beta, const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local channel offset
int c = index % channels;
int pre_pad = size / 2;
T accum_ratio = 0;
for (int i = -pre_pad; i < size - pre_pad; ++i) {
if (c + i >= 0 && c + i < channels) {
accum_ratio += top_diff[index + i] * top_data[index + i] /
scale[index + i];
}
}
bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) -
cache_ratio * bottom_data[index] * accum_ratio;
}
}
} // namespace
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
n_threads = X.numel();
hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.numel();
hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.numel(), Y.numel());
DCHECK_EQ(X.numel(), dY.numel());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
const float* Xdata = X.data<float>();
const float* Ydata = Y.data<float>();
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
hipLaunchKernelGGL(( LRNComputeDiffNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_,
2.f * alpha_ * beta_ / size_, dXdata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.numel(), Y.numel());
DCHECK_EQ(X.numel(), dY.numel());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.numel();
hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
hipLaunchKernelGGL(( LRNComputeDiffNHWC<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(),
X.data<float>(),
Y.data<float>(),
scale_data,
dY.data<float>(),
X.dim32(0),
X.dim32(1),
X.dim32(2),
X.dim32(3),
size_,
-beta_,
2.f * alpha_ * beta_ / size_,
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>);
} // namespace caffe2
| 2efabab2276cdf29a878d19d5cb1e303117d1218.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/local_response_normalization_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LRNFillScaleNCHW(const int nthreads, const T* in,
const int num, const int channels, const int height,
const int width, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// recover the pointers for the next loop.
in -= offset;
scale -= offset;
}
}
template <typename T>
__global__ void LRNFillScaleNHWC(const int nthreads, const T* in,
const int num, const int height, const int width,
const int channels, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pre_pad = (size - 1) / 2;
scale[index] = 0;
for (int i = 0; i < size; ++i) {
int raw_idx = c + i - pre_pad;
if (raw_idx >= 0 && raw_idx < channels) {
scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad];
}
}
scale[index] = bias + scale[index] * alpha_over_size;
}
}
// TODO(Yangqing): check if it would be faster to just put it into the previous
// kernel.
template <typename T>
__global__ void LRNComputeOutput(const int nthreads, const T* in,
const T* scale, const T negative_beta, T* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename T>
__global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const T negative_beta,
const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// recover pointer for next iteration.
bottom_data -= offset;
top_data -= offset;
scale -= offset;
top_diff -= offset;
bottom_diff -= offset;
}
}
// This local response normalization gradient does one sum per output location
// and does not use the running trick for 1-d convolution: thus it might not be
// the fastest implementation.
template <typename T>
__global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int height, const int width, const int channels,
const int size, const T negative_beta, const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local channel offset
int c = index % channels;
int pre_pad = size / 2;
T accum_ratio = 0;
for (int i = -pre_pad; i < size - pre_pad; ++i) {
if (c + i >= 0 && c + i < channels) {
accum_ratio += top_diff[index + i] * top_data[index + i] /
scale[index + i];
}
}
bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) -
cache_ratio * bottom_data[index] * accum_ratio;
}
}
} // namespace
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
n_threads = X.numel();
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.numel();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.numel(), Y.numel());
DCHECK_EQ(X.numel(), dY.numel());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
const float* Xdata = X.data<float>();
const float* Ydata = Y.data<float>();
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
LRNComputeDiffNCHW<float><<<CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_,
2.f * alpha_ * beta_ / size_, dXdata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.dim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.numel(), Y.numel());
DCHECK_EQ(X.numel(), dY.numel());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.numel();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeDiffNHWC<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
X.data<float>(),
Y.data<float>(),
scale_data,
dY.data<float>(),
X.dim32(0),
X.dim32(1),
X.dim32(2),
X.dim32(3),
size_,
-beta_,
2.f * alpha_ * beta_ / size_,
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>);
} // namespace caffe2
|
42b7efb8d2d68eee00c4a34fe004ddd7fd1176cc.hip | // !!! This is a file automatically generated by hipify!!!
//=============================================================================
// FILE: mytoy.cu
// AUTHORS: Raul Segura & Manuel Ujaldon (copyright 2014)
// Look for the string "MU" whenever Manuel suggests you to introduce changes
// Feel free to change some other parts of the code too (at your own risk)
//=============================================================================
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "io.h"
//=============================================================================
// CUDA functions.
//=============================================================================
//Error handler for CUDA functions.
void cudaErrorHandler(hipError_t error, const int LINE)
{
if (error != hipSuccess) {
fprintf(stdout, "ERROR(%d): %s\n", LINE, hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
//-----------------------------------------------------------------------------
// Set the GPU device and get its properties.
//Nos da las propiedades de la tarjeta grfica. La estructura hipDeviceProp_t tiene muchos campos que nos pueden servir.
void getDeviceProperties(const int devID, hipDeviceProp_t *deviceProp)
{
// Set device.
cudaErrorHandler(hipSetDevice(devID), __LINE__);
// Get device properties.
fprintf(stdout, "Leyendo propiedades del dispositivo %d...\n", devID);
cudaErrorHandler(hipGetDeviceProperties(deviceProp, devID), __LINE__);
fprintf(stdout, "GPU Device %d: \"%s\": capacidad de cmputo %d.%d.\n\n",
devID, deviceProp->name, deviceProp->major, deviceProp->minor);
}
//=============================================================================
// IOHB functions (Input/Output Harwell-Boeing) adapted from the HB library
//=============================================================================
// Read the input matrix.
void readInputMatrix(const char *matrixFile, int *nrow, int *ncol, int *nnzero,
int **colptr, int **rowind, double **values)
{
// Read the Harwell-Boeing format matrix file.
fprintf(stdout, "Reading input matrix from %s...\n", matrixFile);
readHB_newmat_double(matrixFile, nrow, ncol, nnzero,
colptr, rowind, values);
fprintf(stdout, "Matrix in file %s is %d x %d ", matrixFile, *nrow, *ncol);
fprintf(stdout, "with %d nonzero elements.\n\n", *nnzero);
}
//-----------------------------------------------------------------------------
// Write the output matrix.
void writeOutputMatrix(const char *matrixFile, int nrow, int ncol, int nnzero,
int *colptr, int *rowind, double *values)
{
double *rhs = 0, *guess = 0, *exact = 0;
char mxtype[] = "RUA";
char ptrfmt[] = "(10I8)";
char indfmt[] = "(10I8)";
char valfmt[] = "(5E16.8)";
char rhsfmt[] = "(5E16.8)";
// Write the results of your computation into a file named "eureka",
// which follows the Harwell-Boeing format.
// POINT 1: Puedes cambiar el nombre "Eureka" si quieres comparar dos versiones de cdigo diferentes.
// O en caso de que quieras estar seguro de que algunas ejecuciones del mismo cdigo producen exactamente el mismo resultado (no race conditions occur when your
// parallel strategy is deployed).
//
// Incluso podras evitar llamar a esta funcin si la operacin de salida es demasiado larga.
fprintf(stdout, "Writing output matrix in %s...\n", matrixFile);
writeHB_mat_double(matrixFile, nrow, ncol, nnzero, colptr, rowind, values,
0, rhs, guess, exact, matrixFile, "eureka", mxtype,
ptrfmt, indfmt, valfmt, rhsfmt, "FGN");
fprintf(stdout, "Generated file %s successfully.\n\n", matrixFile);
}
//=============================================================================
// The CUDA Kernel.
//=============================================================================
// Cada hebra aade el elemento que le ha sido asignado a la matriz dispersa
// POINT 2: Cambia el tipo de dato a int, float or double
// You may want to change "float *dvalues" by "double *dvalues" in case
// you are curious to see how much GFLOPS drop when using double precision.
// Or even use "int dvalues" if you want to measure performance in integer ALUs.
// (see also hint MU4 below)
__global__ void kernelAdd(float *dvalues, int numOperations,
int firstInd, int nextColInd) //Qu trozo de la matriz tiene que tocar
{
int vi = firstInd + blockIdx.x * blockDim.x + threadIdx.x;
// "numOperations" is the 2nd input parameter to our executable
if (vi < nextColInd) {
for (int j=0; j<numOperations; ++j) { //Esto tambin lo podemos cambiar
// The operation performed on each nonzero of our sparse matrix:
dvalues[vi] *=dvalues[vi]+dvalues[vi]*dvalues[vi]; // POINT 3: Choices you may try here: Podemos probar divisiones, o cualquier tipo de operacin. Segn qu operacin hagamos, influye en los GFLOPS.
} // *= (for multiply), /= (for division),
} // or you may investigate some other :-)
}
//=============================================================================
// Main.
//=============================================================================
int main(int argc, char **argv)
{
// ======================= Declaracin de variables ==================
//=========================================================================
// Variables.
// CUDA.
hipDeviceProp_t deviceProp;
hipStream_t *stream;
hipEvent_t start, stop;
// Matrix.
// Harwell-Boeing format.
int nrow, ncol, nnzero;
// Compressed Sparse Column format.
int *colptr, *rowind;
float *values; // POINT 4: Puedes usar int para medir el rendimeinto en operaciones en punto fijo
// o double para doble precisin
double *values64; //
// To measure time elapsed and performance achieved
float msecMemHst, msecMemDvc, msecCompStr, msecCompKrn;
float numOperationsPerValue, numFloatingPointOperations, opIntensity;
double flops, gigaFlops;
// Misc.
int devID;
int *blocks;
int *threads;
float *dvalues; // POINT 5: This declaration is binded to hints MU2 and MU4
// ======================= Comprobacin de parmetros de entrada ==================
//=========================================================================
// Check command line arguments.
if (argc < 5) {
fprintf(stderr, "ERROR: Nmero equivocado de argumentos: %d\n", argc - 1);
fprintf(stderr, "Use: ./mytoy <deviceID> <numOperationsPer");
fprintf(stderr, "Value> <inputMatrixFile> <outputMatrixFile>\n");
exit(EXIT_FAILURE);
}
//-------------------------------------------------------------------------
// This part is just to restrict the execution to device (GPU) 0 or 1
devID = atoi(argv[1]);
if ((devID != 0) && (devID != 1)) {
fprintf(stderr, "ERROR: El primero parmetro es %s.\n", argv[1]);
fprintf(stderr, "Tiene que ser 0 para seleccionar el dispositivo GPU en el que vamos a ejecutar.");
exit(EXIT_FAILURE);
}
numOperationsPerValue = atoi(argv[2]);
if (numOperationsPerValue <= 0) {
fprintf(stderr, "ERROR: El segundo parmetro es incorrecto: %s.\n", argv[2]);
fprintf(stderr, "Representa el nmero de operaciones por valor y debe ser mayor que 0 ");
exit(EXIT_FAILURE);
}
// ======================= Lectura de las caractersticas de la tarjeta ==================
//=========================================================================
// Get properties of the chosen device.
getDeviceProperties(devID, &deviceProp);
// =================== Creacin de eventos para monitorizar el tiempo ========
//-------------------------------------------------------------------------
// Create CUDA events for timing.
hipEventCreate(&start);
hipEventCreate(&stop);
//====================== Lectura de la matriz de entrada ===================================================
// Lee la matriz de entrada.
readInputMatrix(argv[3], &nrow, &ncol, &nnzero,
&colptr, &rowind, &values64);
fprintf(stderr,"Tamao de la matriz, nrow=%d, ncol=%d\n",nrow,ncol);
// ======================= Reserva de memoria ==================
// POINT 6: Aqu hay que especificar el tipo de dato que puede ser float, double o int (ver Punto 2, punto 4 y punto 5)
values = (float*)malloc(nnzero * sizeof(float));
for (int i=0; i<nnzero; ++i) {
// POINT 7: No olvides cambiar el casting segn la declaracin del punto 2, 4, 5 y 6
values[i] = (float)values64[i];
}
// ======================= Valores para calcular los bloques y el nmero de hebras por bloque ==================
// Maximum number of threads per block and warp size.
int maxThreadsPerBlock = 1024;
const int warpSize = 32; // Esto no se puede cambiar, no es optativo.
// ======================= Calculo del grid de hebras ==================
// Calcular el nmero de bloques y de hebras que necesitamos para cada columna
// POINT 8: Aqu tienes que establecer el tamao del grid para sacarle todo el paralelismo que puedas al lanzamiento del kernel (Ver punto 11)
blocks = (int*)malloc(ncol * sizeof(int));
threads = (int*)malloc(ncol * sizeof(int));
// ======================= Clculo de los diferentes grids que vamos a lanzar ==================
for (int i=0; i<ncol; ++i) {
fprintf(stderr,"Para i=%d, (((colptr[%d](%d) - colptr[%d](%d))/%d)+1)*%d",i, i+1,colptr[i+1],i,colptr[i],warpSize,warpSize);
threads[i] = (((colptr[i+1] - colptr[i]) / warpSize) + 1) * warpSize;
fprintf(stderr,"->>>> threads[%d]=%d",i,threads[i]);
if (threads[i] <= maxThreadsPerBlock) {
blocks[i] = 1;
} else {
blocks[i] = threads[i] / maxThreadsPerBlock;
if (threads[i] % maxThreadsPerBlock > 0) {blocks[i]++;}
threads[i] = maxThreadsPerBlock;
fprintf(stderr,"->>>> threads[%d]=%d",i,threads[i]);
}
fprintf(stderr,"->>>> Blocks[%d]=%d\n",i,blocks[i]);
}
//========================= Ejecucin de los grids ===================================
// Ejecucin
fprintf(stdout, "Running mytoy.\n");
//-------------------------------------------------------------------------
// Copy matrix values from host memory to device memory.
//PUNTO 9: Hay que adecuar el tipo de dato a float, int o double (Ver puntos 2, 4, 5, 6 y 7)
int valuesSize = nnzero * sizeof(float);
cudaErrorHandler(hipEventRecord(start, NULL), __LINE__);
//fprintf(stdout, "Reservando %d bytes en la memoria del ", valuesSize);
//fprintf(stdout, "dispositivo para los valores del array ...\n");
cudaErrorHandler(hipMalloc((void**)&dvalues, valuesSize), __LINE__);
//fprintf(stdout, "Copiando datos desde la memoria del host hasta la memoria del dispositivo...\n");
cudaErrorHandler(hipMemcpy(dvalues, values, valuesSize,
hipMemcpyHostToDevice), __LINE__);
cudaErrorHandler(hipEventRecord(stop, NULL), __LINE__); // Registra el momento del evento de finalizacin de la copia de la memoria
cudaErrorHandler(hipEventSynchronize(stop), __LINE__);
cudaErrorHandler(hipEventElapsedTime(&msecMemHst, start, stop), __LINE__); // Calcula el tiempo transcurridos con una precisin de 0.5 microsegundos
//-------------------------------------------------------------------------
// Create streams.
cudaErrorHandler(hipEventRecord(start, NULL), __LINE__); // Comienza el siguiente tramo de cdigo
// PUNTO 10: Si crees que un slo stream es mejor para toda la matriz,
// slo tienes que reemplazar la siguiente sentencia y el bucle por la siguiente lnea
// cudaErrorHandler(hipStreamCreate(&stream), __LINE__);
stream = (hipStream_t*)malloc(ncol * sizeof(hipStream_t));
for (int i=0; i<ncol; ++i) {
cudaErrorHandler(hipStreamCreate(&stream[i]), __LINE__);
}
//fprintf(stdout, "Stream(s) Creado correctamente.\n");
cudaErrorHandler(hipEventRecord(stop, NULL), __LINE__); // Registra la finalizacin del evento
cudaErrorHandler(hipEventSynchronize(stop), __LINE__); // Sincroniza
cudaErrorHandler(hipEventElapsedTime(&msecCompStr, start, stop),__LINE__); // Calcula el tiempo
//-------------------------------------------------------------------------
// Launch streams.
cudaErrorHandler(hipEventRecord(start, NULL), __LINE__); // Comienza el lanzamiento
//fprintf(stdout, "Lanzando un stream por columna...\n");
for (int i=0; i<ncol; ++i) { // PUNTO 11: La forma en la que se despliega el paralelismo est aqu.
// Reemplaza stream[i] por stream en la siguiente lnea si has hecho el cambio del punto 9
hipLaunchKernelGGL(( kernelAdd), dim3(blocks[i]), dim3(threads[i]), 0, stream[i], dvalues, numOperationsPerValue, colptr[i], colptr[i+1]);
}
//fprintf(stdout, "Ejecutando los streams...\n");
cudaErrorHandler(hipEventRecord(stop, NULL), __LINE__);
cudaErrorHandler(hipEventSynchronize(stop), __LINE__);
cudaErrorHandler(hipEventElapsedTime(&msecCompKrn, start, stop),__LINE__);
cudaErrorHandler(hipDeviceSynchronize(), __LINE__);
fprintf(stdout, "Streams executed successfully.\n");
//-------------------------------------------------------------------------
// Copiar los resultados de vuelta a la CPU.
cudaErrorHandler(hipEventRecord(start, NULL), __LINE__);
fprintf(stdout, "Copiando los valores de vuelta desde la... ");
fprintf(stdout, "memoria del dispositivo hasta la memoria del host...\n\n");
cudaErrorHandler(hipMemcpy(values, dvalues, valuesSize,
hipMemcpyDeviceToHost), __LINE__);
cudaErrorHandler(hipEventRecord(stop, NULL), __LINE__);
cudaErrorHandler(hipEventSynchronize(stop), __LINE__);
cudaErrorHandler(hipEventElapsedTime(&msecMemDvc, start, stop), __LINE__);
//=======================Escribir matriz de salida ======================
// Escribir la matriz de salida
for (int i=0; i<nnzero; ++i) {
values64[i] = (double)values[i];
}
writeOutputMatrix(argv[4], nrow, ncol, nnzero,
colptr, rowind, values64);
// ======================= Calculo de rendimiento ==================
// Imprimiendo tiempos y porcentages.
float msecMem = msecMemHst + msecMemDvc;
float msecComp = msecCompStr + msecCompKrn;
fprintf(stdout, "Tiempo de acceso a la memoria de la GPU: %.4f ms.\n\n", msecMem);
fprintf(stdout, "Creacin de streams en la GPU: %.4f ms.\n", msecCompStr);
fprintf(stdout, "Tiempo de ejecucin del kernel: %.4f ms.\n", msecCompKrn);
fprintf(stdout, "Tiempo de computacin en GPU: %.4f ms.\n\n", msecComp);
//PUNTO 12: Cambia float, int or double segn el punto 2, 4, 5, 6, 7 y 8
opIntensity = numOperationsPerValue / sizeof(float);
fprintf(stdout, "Operaciones en punto flotante por byte: %.4f FLOP/byte.\n", opIntensity);
numFloatingPointOperations = nnzero * numOperationsPerValue;
flops = numFloatingPointOperations / (msecComp / 1000.0f);
gigaFlops = flops * 1.0e-9f;
fprintf(stdout, "Rendimiento: %.4f GFLOP/s.\n\n", gigaFlops);
//=========================================================================
// Free host memory.
free(colptr); free(rowind); free(values);
free(blocks); free(threads);
// liberacin.
cudaErrorHandler(hipDeviceReset(), __LINE__);
return EXIT_SUCCESS;
}
| 42b7efb8d2d68eee00c4a34fe004ddd7fd1176cc.cu | //=============================================================================
// FILE: mytoy.cu
// AUTHORS: Raul Segura & Manuel Ujaldon (copyright 2014)
// Look for the string "MU" whenever Manuel suggests you to introduce changes
// Feel free to change some other parts of the code too (at your own risk)
//=============================================================================
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "io.h"
//=============================================================================
// CUDA functions.
//=============================================================================
//Error handler for CUDA functions.
void cudaErrorHandler(cudaError_t error, const int LINE)
{
if (error != cudaSuccess) {
fprintf(stdout, "ERROR(%d): %s\n", LINE, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
//-----------------------------------------------------------------------------
// Set the GPU device and get its properties.
//Nos da las propiedades de la tarjeta gráfica. La estructura cudaDeviceProp tiene muchos campos que nos pueden servir.
void getDeviceProperties(const int devID, cudaDeviceProp *deviceProp)
{
// Set device.
cudaErrorHandler(cudaSetDevice(devID), __LINE__);
// Get device properties.
fprintf(stdout, "Leyendo propiedades del dispositivo %d...\n", devID);
cudaErrorHandler(cudaGetDeviceProperties(deviceProp, devID), __LINE__);
fprintf(stdout, "GPU Device %d: \"%s\": capacidad de cómputo %d.%d.\n\n",
devID, deviceProp->name, deviceProp->major, deviceProp->minor);
}
//=============================================================================
// IOHB functions (Input/Output Harwell-Boeing) adapted from the HB library
//=============================================================================
// Read the input matrix.
void readInputMatrix(const char *matrixFile, int *nrow, int *ncol, int *nnzero,
int **colptr, int **rowind, double **values)
{
// Read the Harwell-Boeing format matrix file.
fprintf(stdout, "Reading input matrix from %s...\n", matrixFile);
readHB_newmat_double(matrixFile, nrow, ncol, nnzero,
colptr, rowind, values);
fprintf(stdout, "Matrix in file %s is %d x %d ", matrixFile, *nrow, *ncol);
fprintf(stdout, "with %d nonzero elements.\n\n", *nnzero);
}
//-----------------------------------------------------------------------------
// Write the output matrix.
void writeOutputMatrix(const char *matrixFile, int nrow, int ncol, int nnzero,
int *colptr, int *rowind, double *values)
{
double *rhs = 0, *guess = 0, *exact = 0;
char mxtype[] = "RUA";
char ptrfmt[] = "(10I8)";
char indfmt[] = "(10I8)";
char valfmt[] = "(5E16.8)";
char rhsfmt[] = "(5E16.8)";
// Write the results of your computation into a file named "eureka",
// which follows the Harwell-Boeing format.
// POINT 1: Puedes cambiar el nombre "Eureka" si quieres comparar dos versiones de código diferentes.
// O en caso de que quieras estar seguro de que algunas ejecuciones del mismo código producen exactamente el mismo resultado (no race conditions occur when your
// parallel strategy is deployed).
//
// Incluso podrías evitar llamar a esta función si la operación de salida es demasiado larga.
fprintf(stdout, "Writing output matrix in %s...\n", matrixFile);
writeHB_mat_double(matrixFile, nrow, ncol, nnzero, colptr, rowind, values,
0, rhs, guess, exact, matrixFile, "eureka", mxtype,
ptrfmt, indfmt, valfmt, rhsfmt, "FGN");
fprintf(stdout, "Generated file %s successfully.\n\n", matrixFile);
}
//=============================================================================
// The CUDA Kernel.
//=============================================================================
// Cada hebra añade el elemento que le ha sido asignado a la matriz dispersa
// POINT 2: Cambia el tipo de dato a int, float or double
// You may want to change "float *dvalues" by "double *dvalues" in case
// you are curious to see how much GFLOPS drop when using double precision.
// Or even use "int dvalues" if you want to measure performance in integer ALUs.
// (see also hint MU4 below)
__global__ void kernelAdd(float *dvalues, int numOperations,
int firstInd, int nextColInd) //Qué trozo de la matriz tiene que tocar
{
int vi = firstInd + blockIdx.x * blockDim.x + threadIdx.x;
// "numOperations" is the 2nd input parameter to our executable
if (vi < nextColInd) {
for (int j=0; j<numOperations; ++j) { //Esto también lo podemos cambiar
// The operation performed on each nonzero of our sparse matrix:
dvalues[vi] *=dvalues[vi]+dvalues[vi]*dvalues[vi]; // POINT 3: Choices you may try here: Podemos probar divisiones, o cualquier tipo de operación. Según qué operación hagamos, influye en los GFLOPS.
} // *= (for multiply), /= (for division),
} // or you may investigate some other :-)
}
//=============================================================================
// Main.
//=============================================================================
int main(int argc, char **argv)
{
// ======================= Declaración de variables ==================
//=========================================================================
// Variables.
// CUDA.
cudaDeviceProp deviceProp;
cudaStream_t *stream;
cudaEvent_t start, stop;
// Matrix.
// Harwell-Boeing format.
int nrow, ncol, nnzero;
// Compressed Sparse Column format.
int *colptr, *rowind;
float *values; // POINT 4: Puedes usar int para medir el rendimeinto en operaciones en punto fijo
// o double para doble precisión
double *values64; //
// To measure time elapsed and performance achieved
float msecMemHst, msecMemDvc, msecCompStr, msecCompKrn;
float numOperationsPerValue, numFloatingPointOperations, opIntensity;
double flops, gigaFlops;
// Misc.
int devID;
int *blocks;
int *threads;
float *dvalues; // POINT 5: This declaration is binded to hints MU2 and MU4
// ======================= Comprobación de parámetros de entrada ==================
//=========================================================================
// Check command line arguments.
if (argc < 5) {
fprintf(stderr, "ERROR: Número equivocado de argumentos: %d\n", argc - 1);
fprintf(stderr, "Use: ./mytoy <deviceID> <numOperationsPer");
fprintf(stderr, "Value> <inputMatrixFile> <outputMatrixFile>\n");
exit(EXIT_FAILURE);
}
//-------------------------------------------------------------------------
// This part is just to restrict the execution to device (GPU) 0 or 1
devID = atoi(argv[1]);
if ((devID != 0) && (devID != 1)) {
fprintf(stderr, "ERROR: El primero parámetro es %s.\n", argv[1]);
fprintf(stderr, "Tiene que ser 0 para seleccionar el dispositivo GPU en el que vamos a ejecutar.");
exit(EXIT_FAILURE);
}
numOperationsPerValue = atoi(argv[2]);
if (numOperationsPerValue <= 0) {
fprintf(stderr, "ERROR: El segundo parámetro es incorrecto: %s.\n", argv[2]);
fprintf(stderr, "Representa el número de operaciones por valor y debe ser mayor que 0 ");
exit(EXIT_FAILURE);
}
// ======================= Lectura de las características de la tarjeta ==================
//=========================================================================
// Get properties of the chosen device.
getDeviceProperties(devID, &deviceProp);
// =================== Creación de eventos para monitorizar el tiempo ========
//-------------------------------------------------------------------------
// Create CUDA events for timing.
cudaEventCreate(&start);
cudaEventCreate(&stop);
//====================== Lectura de la matriz de entrada ===================================================
// Lee la matriz de entrada.
readInputMatrix(argv[3], &nrow, &ncol, &nnzero,
&colptr, &rowind, &values64);
fprintf(stderr,"Tamaño de la matriz, nrow=%d, ncol=%d\n",nrow,ncol);
// ======================= Reserva de memoria ==================
// POINT 6: Aquí hay que especificar el tipo de dato que puede ser float, double o int (ver Punto 2, punto 4 y punto 5)
values = (float*)malloc(nnzero * sizeof(float));
for (int i=0; i<nnzero; ++i) {
// POINT 7: No olvides cambiar el casting según la declaración del punto 2, 4, 5 y 6
values[i] = (float)values64[i];
}
// ======================= Valores para calcular los bloques y el número de hebras por bloque ==================
// Maximum number of threads per block and warp size.
int maxThreadsPerBlock = 1024;
const int warpSize = 32; // Esto no se puede cambiar, no es optativo.
// ======================= Calculo del grid de hebras ==================
// Calcular el número de bloques y de hebras que necesitamos para cada columna
// POINT 8: Aquí tienes que establecer el tamaño del grid para sacarle todo el paralelismo que puedas al lanzamiento del kernel (Ver punto 11)
blocks = (int*)malloc(ncol * sizeof(int));
threads = (int*)malloc(ncol * sizeof(int));
// ======================= Cálculo de los diferentes grids que vamos a lanzar ==================
for (int i=0; i<ncol; ++i) {
fprintf(stderr,"Para i=%d, (((colptr[%d](%d) - colptr[%d](%d))/%d)+1)*%d",i, i+1,colptr[i+1],i,colptr[i],warpSize,warpSize);
threads[i] = (((colptr[i+1] - colptr[i]) / warpSize) + 1) * warpSize;
fprintf(stderr,"->>>> threads[%d]=%d",i,threads[i]);
if (threads[i] <= maxThreadsPerBlock) {
blocks[i] = 1;
} else {
blocks[i] = threads[i] / maxThreadsPerBlock;
if (threads[i] % maxThreadsPerBlock > 0) {blocks[i]++;}
threads[i] = maxThreadsPerBlock;
fprintf(stderr,"->>>> threads[%d]=%d",i,threads[i]);
}
fprintf(stderr,"->>>> Blocks[%d]=%d\n",i,blocks[i]);
}
//========================= Ejecución de los grids ===================================
// Ejecución
fprintf(stdout, "Running mytoy.\n");
//-------------------------------------------------------------------------
// Copy matrix values from host memory to device memory.
//PUNTO 9: Hay que adecuar el tipo de dato a float, int o double (Ver puntos 2, 4, 5, 6 y 7)
int valuesSize = nnzero * sizeof(float);
cudaErrorHandler(cudaEventRecord(start, NULL), __LINE__);
//fprintf(stdout, "Reservando %d bytes en la memoria del ", valuesSize);
//fprintf(stdout, "dispositivo para los valores del array ...\n");
cudaErrorHandler(cudaMalloc((void**)&dvalues, valuesSize), __LINE__);
//fprintf(stdout, "Copiando datos desde la memoria del host hasta la memoria del dispositivo...\n");
cudaErrorHandler(cudaMemcpy(dvalues, values, valuesSize,
cudaMemcpyHostToDevice), __LINE__);
cudaErrorHandler(cudaEventRecord(stop, NULL), __LINE__); // Registra el momento del evento de finalización de la copia de la memoria
cudaErrorHandler(cudaEventSynchronize(stop), __LINE__);
cudaErrorHandler(cudaEventElapsedTime(&msecMemHst, start, stop), __LINE__); // Calcula el tiempo transcurridos con una precisión de 0.5 microsegundos
//-------------------------------------------------------------------------
// Create streams.
cudaErrorHandler(cudaEventRecord(start, NULL), __LINE__); // Comienza el siguiente tramo de código
// PUNTO 10: Si crees que un sólo stream es mejor para toda la matriz,
// sólo tienes que reemplazar la siguiente sentencia y el bucle por la siguiente línea
// cudaErrorHandler(cudaStreamCreate(&stream), __LINE__);
stream = (cudaStream_t*)malloc(ncol * sizeof(cudaStream_t));
for (int i=0; i<ncol; ++i) {
cudaErrorHandler(cudaStreamCreate(&stream[i]), __LINE__);
}
//fprintf(stdout, "Stream(s) Creado correctamente.\n");
cudaErrorHandler(cudaEventRecord(stop, NULL), __LINE__); // Registra la finalización del evento
cudaErrorHandler(cudaEventSynchronize(stop), __LINE__); // Sincroniza
cudaErrorHandler(cudaEventElapsedTime(&msecCompStr, start, stop),__LINE__); // Calcula el tiempo
//-------------------------------------------------------------------------
// Launch streams.
cudaErrorHandler(cudaEventRecord(start, NULL), __LINE__); // Comienza el lanzamiento
//fprintf(stdout, "Lanzando un stream por columna...\n");
for (int i=0; i<ncol; ++i) { // PUNTO 11: La forma en la que se despliega el paralelismo está aquí.
// Reemplaza stream[i] por stream en la siguiente línea si has hecho el cambio del punto 9
kernelAdd<<<blocks[i], threads[i], 0, stream[i]>>>(dvalues, numOperationsPerValue, colptr[i], colptr[i+1]);
}
//fprintf(stdout, "Ejecutando los streams...\n");
cudaErrorHandler(cudaEventRecord(stop, NULL), __LINE__);
cudaErrorHandler(cudaEventSynchronize(stop), __LINE__);
cudaErrorHandler(cudaEventElapsedTime(&msecCompKrn, start, stop),__LINE__);
cudaErrorHandler(cudaDeviceSynchronize(), __LINE__);
fprintf(stdout, "Streams executed successfully.\n");
//-------------------------------------------------------------------------
// Copiar los resultados de vuelta a la CPU.
cudaErrorHandler(cudaEventRecord(start, NULL), __LINE__);
fprintf(stdout, "Copiando los valores de vuelta desde la... ");
fprintf(stdout, "memoria del dispositivo hasta la memoria del host...\n\n");
cudaErrorHandler(cudaMemcpy(values, dvalues, valuesSize,
cudaMemcpyDeviceToHost), __LINE__);
cudaErrorHandler(cudaEventRecord(stop, NULL), __LINE__);
cudaErrorHandler(cudaEventSynchronize(stop), __LINE__);
cudaErrorHandler(cudaEventElapsedTime(&msecMemDvc, start, stop), __LINE__);
//=======================Escribir matriz de salida ======================
// Escribir la matriz de salida
for (int i=0; i<nnzero; ++i) {
values64[i] = (double)values[i];
}
writeOutputMatrix(argv[4], nrow, ncol, nnzero,
colptr, rowind, values64);
// ======================= Calculo de rendimiento ==================
// Imprimiendo tiempos y porcentages.
float msecMem = msecMemHst + msecMemDvc;
float msecComp = msecCompStr + msecCompKrn;
fprintf(stdout, "Tiempo de acceso a la memoria de la GPU: %.4f ms.\n\n", msecMem);
fprintf(stdout, "Creación de streams en la GPU: %.4f ms.\n", msecCompStr);
fprintf(stdout, "Tiempo de ejecución del kernel: %.4f ms.\n", msecCompKrn);
fprintf(stdout, "Tiempo de computación en GPU: %.4f ms.\n\n", msecComp);
//PUNTO 12: Cambia float, int or double según el punto 2, 4, 5, 6, 7 y 8
opIntensity = numOperationsPerValue / sizeof(float);
fprintf(stdout, "Operaciones en punto flotante por byte: %.4f FLOP/byte.\n", opIntensity);
numFloatingPointOperations = nnzero * numOperationsPerValue;
flops = numFloatingPointOperations / (msecComp / 1000.0f);
gigaFlops = flops * 1.0e-9f;
fprintf(stdout, "Rendimiento: %.4f GFLOP/s.\n\n", gigaFlops);
//=========================================================================
// Free host memory.
free(colptr); free(rowind); free(values);
free(blocks); free(threads);
// liberación.
cudaErrorHandler(cudaDeviceReset(), __LINE__);
return EXIT_SUCCESS;
}
|
219009a8df1f612786224ea8a175ec4dd5f54134.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/arg_min_max_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <limits>
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace { // NOLINT
template <typename K, typename V>
using KeyValuePair = hipcub::KeyValuePair<K, V>;
} // end namespace
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__);
template <typename T, typename IndType, class Reducer, size_t BlockDim>
__global__ void ArgCUDAKernel(const int64_t height, // n * h
const int64_t width, // c
const int64_t post_size, // h
const Reducer reducer,
const T init,
const T* in,
IndType* out) {
typedef hipcub::BlockReduce<KeyValuePair<int, T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int idx = blockIdx.x; idx < height; idx += gridDim.x) {
KeyValuePair<int, T> kv_pair = {-1, init};
int h = idx / post_size;
int w = idx % post_size;
for (int k = threadIdx.x; k < width; k += blockDim.x) {
kv_pair =
reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair);
}
kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer);
if (threadIdx.x == 0) {
out[idx] = static_cast<IndType>(kv_pair.key);
}
__syncthreads();
}
}
template <typename T, typename IndType, class Reducer>
void ComputeFullArg(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* indices,
const int64_t pre,
const int64_t post,
const int64_t n) {
auto cu_stream = dev_ctx.stream();
auto ComputeBlockSize = [](int64_t col) {
auto block_size = 8;
if (col > 512)
block_size = 1024;
else if (col > 256)
block_size = 512;
else if (col > 128)
block_size = 256;
else if (col > 64)
block_size = 128;
else if (col > 32)
block_size = 64;
else if (col > 16)
block_size = 32;
else if (col > 8)
block_size = 16;
#ifdef __HIPCC__
block_size = ::min(block_size, 256);
#endif
return block_size;
};
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t height = pre * post;
int64_t width = n;
int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx;
const T* in_data = input.data<T>();
IndType* out_data = dev_ctx.template Alloc<IndType>(indices);
if (typeid(Reducer) == typeid(hipcub::ArgMax)) {
switch (ComputeBlockSize(width)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>)
, dim3(grid_size), dim3(kBlockDim), 0, cu_stream,
height,
width,
post,
Reducer(),
std::numeric_limits<T>::lowest(),
in_data,
out_data));
}
} else {
switch (ComputeBlockSize(width)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>)
, dim3(grid_size), dim3(kBlockDim), 0, cu_stream,
height,
width,
post,
Reducer(),
std::numeric_limits<T>::max(),
in_data,
out_data));
}
}
}
template <typename Context, typename T, class Reducer>
struct VisitDataCudaArgMinMaxFunctor {
const Context& dev_ctx;
const DenseTensor& x;
int64_t axis;
bool keepdims;
bool flatten;
DenseTensor* out;
explicit VisitDataCudaArgMinMaxFunctor(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
DenseTensor* out)
: dev_ctx(dev_ctx),
x(x),
axis(axis),
keepdims(keepdims),
flatten(flatten),
out(out) {}
template <typename IndType>
void apply() const {
phi::DDim x_dims;
int new_axis = axis;
if (flatten) {
x_dims = phi::make_ddim({x.numel()});
// if flatten, the axis just as 0
new_axis = 0;
} else {
x_dims = x.dims();
if (axis < 0) new_axis = axis + x.dims().size();
}
// For 0D Tensor
if (x.dims().size() == 0) {
dev_ctx.template Alloc<IndType>(out);
phi::funcs::set_constant(dev_ctx, out, 0);
return;
}
int64_t numel = x.numel();
int64_t groups = numel / x_dims[new_axis];
int64_t pre = 1;
int64_t post = 1;
int64_t n = x_dims[new_axis];
for (int i = 0; i < new_axis; i++) {
pre *= x_dims[i];
}
for (int i = new_axis + 1; i < x_dims.size(); i++) {
post *= x_dims[i];
}
ComputeFullArg<T, IndType, Reducer>(dev_ctx, x, out, pre, post, n);
}
};
template <typename Context, typename T, class Reducer>
void ArgMinMaxOpCUDAKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
if (dtype < 0) {
phi::VisitDataTypeTiny(
phi::DataType::INT64,
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis.to<int64_t>(), keepdims, flatten, out));
return;
}
phi::VisitDataTypeTiny(
phi::TransToPhiDataType(dtype),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis.to<int64_t>(), keepdims, flatten, out));
}
template <typename T, typename Context>
void ArgMinKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, hipcub::ArgMin>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
template <typename T, typename Context>
void ArgMaxKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, hipcub::ArgMax>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
#endif
} // namespace phi
PD_REGISTER_KERNEL(argmin,
GPU,
ALL_LAYOUT,
phi::ArgMinKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
PD_REGISTER_KERNEL(argmax,
GPU,
ALL_LAYOUT,
phi::ArgMaxKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
| 219009a8df1f612786224ea8a175ec4dd5f54134.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/arg_min_max_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <limits>
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace { // NOLINT
template <typename K, typename V>
using KeyValuePair = cub::KeyValuePair<K, V>;
} // end namespace
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__);
template <typename T, typename IndType, class Reducer, size_t BlockDim>
__global__ void ArgCUDAKernel(const int64_t height, // n * h
const int64_t width, // c
const int64_t post_size, // h
const Reducer reducer,
const T init,
const T* in,
IndType* out) {
typedef cub::BlockReduce<KeyValuePair<int, T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int idx = blockIdx.x; idx < height; idx += gridDim.x) {
KeyValuePair<int, T> kv_pair = {-1, init};
int h = idx / post_size;
int w = idx % post_size;
for (int k = threadIdx.x; k < width; k += blockDim.x) {
kv_pair =
reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair);
}
kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer);
if (threadIdx.x == 0) {
out[idx] = static_cast<IndType>(kv_pair.key);
}
__syncthreads();
}
}
template <typename T, typename IndType, class Reducer>
void ComputeFullArg(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* indices,
const int64_t pre,
const int64_t post,
const int64_t n) {
auto cu_stream = dev_ctx.stream();
auto ComputeBlockSize = [](int64_t col) {
auto block_size = 8;
if (col > 512)
block_size = 1024;
else if (col > 256)
block_size = 512;
else if (col > 128)
block_size = 256;
else if (col > 64)
block_size = 128;
else if (col > 32)
block_size = 64;
else if (col > 16)
block_size = 32;
else if (col > 8)
block_size = 16;
#ifdef __HIPCC__
block_size = std::min(block_size, 256);
#endif
return block_size;
};
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t height = pre * post;
int64_t width = n;
int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx;
const T* in_data = input.data<T>();
IndType* out_data = dev_ctx.template Alloc<IndType>(indices);
if (typeid(Reducer) == typeid(cub::ArgMax)) {
switch (ComputeBlockSize(width)) {
FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>
<<<grid_size, kBlockDim, 0, cu_stream>>>(
height,
width,
post,
Reducer(),
std::numeric_limits<T>::lowest(),
in_data,
out_data));
}
} else {
switch (ComputeBlockSize(width)) {
FIXED_BLOCK_DIM_CASE(ArgCUDAKernel<T, IndType, Reducer, kBlockDim>
<<<grid_size, kBlockDim, 0, cu_stream>>>(
height,
width,
post,
Reducer(),
std::numeric_limits<T>::max(),
in_data,
out_data));
}
}
}
template <typename Context, typename T, class Reducer>
struct VisitDataCudaArgMinMaxFunctor {
const Context& dev_ctx;
const DenseTensor& x;
int64_t axis;
bool keepdims;
bool flatten;
DenseTensor* out;
explicit VisitDataCudaArgMinMaxFunctor(const Context& dev_ctx,
const DenseTensor& x,
int64_t axis,
bool keepdims,
bool flatten,
DenseTensor* out)
: dev_ctx(dev_ctx),
x(x),
axis(axis),
keepdims(keepdims),
flatten(flatten),
out(out) {}
template <typename IndType>
void apply() const {
phi::DDim x_dims;
int new_axis = axis;
if (flatten) {
x_dims = phi::make_ddim({x.numel()});
// if flatten, the axis just as 0
new_axis = 0;
} else {
x_dims = x.dims();
if (axis < 0) new_axis = axis + x.dims().size();
}
// For 0D Tensor
if (x.dims().size() == 0) {
dev_ctx.template Alloc<IndType>(out);
phi::funcs::set_constant(dev_ctx, out, 0);
return;
}
int64_t numel = x.numel();
int64_t groups = numel / x_dims[new_axis];
int64_t pre = 1;
int64_t post = 1;
int64_t n = x_dims[new_axis];
for (int i = 0; i < new_axis; i++) {
pre *= x_dims[i];
}
for (int i = new_axis + 1; i < x_dims.size(); i++) {
post *= x_dims[i];
}
ComputeFullArg<T, IndType, Reducer>(dev_ctx, x, out, pre, post, n);
}
};
template <typename Context, typename T, class Reducer>
void ArgMinMaxOpCUDAKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
if (dtype < 0) {
phi::VisitDataTypeTiny(
phi::DataType::INT64,
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis.to<int64_t>(), keepdims, flatten, out));
return;
}
phi::VisitDataTypeTiny(
phi::TransToPhiDataType(dtype),
VisitDataCudaArgMinMaxFunctor<Context, T, Reducer>(
dev_ctx, x, axis.to<int64_t>(), keepdims, flatten, out));
}
template <typename T, typename Context>
void ArgMinKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, cub::ArgMin>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
template <typename T, typename Context>
void ArgMaxKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool keepdims,
bool flatten,
int dtype,
DenseTensor* out) {
ArgMinMaxOpCUDAKernel<Context, T, cub::ArgMax>(
dev_ctx, x, axis, keepdims, flatten, dtype, out);
}
#endif
} // namespace phi
PD_REGISTER_KERNEL(argmin,
GPU,
ALL_LAYOUT,
phi::ArgMinKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
PD_REGISTER_KERNEL(argmax,
GPU,
ALL_LAYOUT,
phi::ArgMaxKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double,
int32_t,
int64_t,
int16_t,
uint8_t) {}
|
e131d77ec0db6eb1b7d145d75b304220a40521ac.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <ATen/core/PhiloxRNGEngine.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <thread>
using namespace at;
/*
* Philox Engine Tests
*/
__global__ void testEngineReproducibility(){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
at::Philox4_32 engine1(0, idx, 4);
at::Philox4_32 engine2(0, idx, 4);
assert(engine1() == engine2());
}
void test_engine_reproducibility(){
hipLaunchKernelGGL(( testEngineReproducibility), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineReproducibility) {
// Test Description:
// Tests if same inputs give same results.
// launch one thread and create two engines.
// Given same seed, idx and offset, assert that the engines
// should be aligned and have the same sequence.
if (!at::cuda::is_available()) return;
test_engine_reproducibility();
hipError_t err = hipDeviceSynchronize();
bool isEQ = err == hipSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset1(){
at::Philox4_32 engine1(123, 1, 0);
// Note: offset is a multiple of 4.
// So if you want to skip 8 values, offset would
// be 2, since 2*4=8.
at::Philox4_32 engine2(123, 1, 2);
for(int i = 0; i < 8; i++){
// Note: instead of using the engine() call 8 times
// we could have achieved the same functionality by
// calling the incr() function twice.
engine1();
}
assert(engine1() == engine2());
}
void test_engine_offset1(){
hipLaunchKernelGGL(( testEngineOffset1), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset1) {
// Test Description:
// Tests offsetting in same thread.
// launch one thread and create two engines.
// make one engine skip the first 8 values and
// make another engine increment to until the
// first 8 values. Assert that the first call
// of engine2 and the 9th call of engine1 are equal.
if (!at::cuda::is_available()) return;
test_engine_offset1();
hipError_t err = hipDeviceSynchronize();
bool isEQ = err == hipSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset2(){
unsigned long long increment_val = ::ldexp(1.0, 64);
at::Philox4_32 engine1(123, 0, increment_val);
at::Philox4_32 engine2(123, increment_val, increment_val);
engine2.incr_n(increment_val);
engine2.incr();
assert(engine1() == engine2());
}
void test_engine_offset2(){
hipLaunchKernelGGL(( testEngineOffset2), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset2) {
// Test Description:
// Tests edge case at the end of the 2^190th value of the generator.
// launch one thread and create two engines
// make engine1 skip to the 2^64th 128 bit while being at thread 0
// make engine2 skip to the 2^64th 128 bit while being at 2^64th thread
// Assert that engine2 should be increment_val+1 steps behind engine1.
if (!at::cuda::is_available()) return;
test_engine_offset2();
hipDeviceSynchronize();
bool isEQ = hipGetLastError() == hipSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset3(){
unsigned long long increment_val = ::ldexp(1.0, 64);
at::Philox4_32 engine1(123, 0, increment_val);
at::Philox4_32 engine2(123, 1, 0);
engine1.incr();
assert(engine1() == engine2());
}
void test_engine_offset3(){
hipLaunchKernelGGL(( testEngineOffset2), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset3) {
// Test Description:
// Tests edge case in between threads.
// launch one thread and create two engines
// make engine1 skip to the 2^64th 128 bit while being at thread 0
// start engine2 at thread 1, with offset 0
// Assert that engine1 is 1 step behind engine2.
if (!at::cuda::is_available()) return;
test_engine_offset3();
hipDeviceSynchronize();
bool isEQ = hipGetLastError() == hipSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineThreadIndex(){
at::Philox4_32 engine1(123456, 0, 4);
at::Philox4_32 engine2(123456, 1, 4);
assert(engine1() != engine2());
}
void test_engine_thread_index(){
hipLaunchKernelGGL(( testEngineThreadIndex), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineIndex) {
// Test Description:
// Tests if thread indexing is working properly.
// launch one thread and create two engines
// with different thread index but same offset.
// Assert that the engines have different sequences.
if (!at::cuda::is_available()) return;
test_engine_thread_index();
hipDeviceSynchronize();
bool isEQ = hipGetLastError() == hipSuccess;
ASSERT_TRUE(isEQ);
}
/*
* CUDA Generator Tests
*/
TEST(CUDAGeneratorImpl, TestGeneratorDynamicCast) {
// Test Description: Check dynamic cast for CUDA
if (!at::cuda::is_available()) return;
auto foo = at::cuda::detail::createCUDAGenerator();
auto result = foo.get<CUDAGeneratorImpl>();
ASSERT_EQ(typeid(at::CUDAGeneratorImpl*).hash_code(), typeid(result).hash_code());
}
TEST(CUDAGeneratorImpl, TestDefaultGenerator) {
// Test Description:
// Check if default generator state is created only once
// address of generator should be same in all calls
if (!at::cuda::is_available()) return;
auto foo = at::cuda::detail::getDefaultCUDAGenerator();
auto bar = at::cuda::detail::getDefaultCUDAGenerator();
ASSERT_EQ(foo, bar);
if (c10::hip::device_count() >= 2) {
foo = at::cuda::detail::getDefaultCUDAGenerator(1);
bar = at::cuda::detail::getDefaultCUDAGenerator(1);
ASSERT_EQ(foo, bar);
foo = at::cuda::detail::getDefaultCUDAGenerator(0);
bar = at::cuda::detail::getDefaultCUDAGenerator(1);
ASSERT_NE(foo, bar);
}
}
TEST(CUDAGeneratorImpl, TestCloning) {
// Test Description:
// Check cloning of new generators.
// Note that we don't allow cloning of other
// generator states into default generators.
if (!at::cuda::is_available()) return;
auto gen1 = at::cuda::detail::createCUDAGenerator();
gen1.set_current_seed(123); // modify gen1 state
auto cuda_gen1 = check_generator<CUDAGeneratorImpl>(gen1);
cuda_gen1->set_philox_offset_per_thread(4);
auto gen2 = at::cuda::detail::createCUDAGenerator();
gen2 = gen1.clone();
auto cuda_gen2 = check_generator<CUDAGeneratorImpl>(gen2);
ASSERT_EQ(gen1.current_seed(), gen2.current_seed());
ASSERT_EQ(
cuda_gen1->philox_offset_per_thread(),
cuda_gen2->philox_offset_per_thread()
);
}
void thread_func_get_set_current_seed(Generator generator) {
std::lock_guard<std::mutex> lock(generator.mutex());
auto current_seed = generator.current_seed();
current_seed++;
generator.set_current_seed(current_seed);
}
TEST(CUDAGeneratorImpl, TestMultithreadingGetSetCurrentSeed) {
// Test Description:
// Test current seed getter and setter are thread safe
// See Note [Acquire lock when using random generators]
if (!at::cuda::is_available()) return;
auto gen1 = at::cuda::detail::getDefaultCUDAGenerator();
auto initial_seed = gen1.current_seed();
std::thread t0{thread_func_get_set_current_seed, gen1};
std::thread t1{thread_func_get_set_current_seed, gen1};
std::thread t2{thread_func_get_set_current_seed, gen1};
t0.join();
t1.join();
t2.join();
ASSERT_EQ(gen1.current_seed(), initial_seed+3);
}
TEST(CUDAGeneratorImpl, TestRNGForking) {
// Test Description:
// Test that state of a generator can be frozen and
// restored
// See Note [Acquire lock when using random generators]
if (!at::cuda::is_available()) return;
auto default_gen = at::cuda::detail::getDefaultCUDAGenerator();
auto current_gen = at::cuda::detail::createCUDAGenerator();
{
std::lock_guard<std::mutex> lock(default_gen.mutex());
current_gen = default_gen.clone(); // capture the current state of default generator
}
auto target_value = at::randn({1000}, at::kCUDA);
// Dramatically alter the internal state of the main generator
auto x = at::randn({100000}, at::kCUDA);
auto forked_value = at::randn({1000}, current_gen, at::kCUDA);
ASSERT_EQ(target_value.sum().item<double>(), forked_value.sum().item<double>());
}
void makeRandomNumber() {
hipSetDevice(std::rand() % 2);
auto x = at::randn({1000});
}
void testCudaRNGMultithread() {
auto threads = std::vector<std::thread>();
for (auto i = 0; i < 1000; i++) {
threads.emplace_back(makeRandomNumber);
}
for (auto& t : threads) {
t.join();
}
};
TEST(CUDAGeneratorImpl, TestMultithreadRNG) {
if (!at::cuda::is_available()) return;
testCudaRNGMultithread();
}
| e131d77ec0db6eb1b7d145d75b304220a40521ac.cu | #include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <ATen/core/PhiloxRNGEngine.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <thread>
using namespace at;
/*
* Philox Engine Tests
*/
__global__ void testEngineReproducibility(){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
at::Philox4_32 engine1(0, idx, 4);
at::Philox4_32 engine2(0, idx, 4);
assert(engine1() == engine2());
}
void test_engine_reproducibility(){
testEngineReproducibility<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineReproducibility) {
// Test Description:
// Tests if same inputs give same results.
// launch one thread and create two engines.
// Given same seed, idx and offset, assert that the engines
// should be aligned and have the same sequence.
if (!at::cuda::is_available()) return;
test_engine_reproducibility();
cudaError_t err = cudaDeviceSynchronize();
bool isEQ = err == cudaSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset1(){
at::Philox4_32 engine1(123, 1, 0);
// Note: offset is a multiple of 4.
// So if you want to skip 8 values, offset would
// be 2, since 2*4=8.
at::Philox4_32 engine2(123, 1, 2);
for(int i = 0; i < 8; i++){
// Note: instead of using the engine() call 8 times
// we could have achieved the same functionality by
// calling the incr() function twice.
engine1();
}
assert(engine1() == engine2());
}
void test_engine_offset1(){
testEngineOffset1<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset1) {
// Test Description:
// Tests offsetting in same thread.
// launch one thread and create two engines.
// make one engine skip the first 8 values and
// make another engine increment to until the
// first 8 values. Assert that the first call
// of engine2 and the 9th call of engine1 are equal.
if (!at::cuda::is_available()) return;
test_engine_offset1();
cudaError_t err = cudaDeviceSynchronize();
bool isEQ = err == cudaSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset2(){
unsigned long long increment_val = ::ldexp(1.0, 64);
at::Philox4_32 engine1(123, 0, increment_val);
at::Philox4_32 engine2(123, increment_val, increment_val);
engine2.incr_n(increment_val);
engine2.incr();
assert(engine1() == engine2());
}
void test_engine_offset2(){
testEngineOffset2<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset2) {
// Test Description:
// Tests edge case at the end of the 2^190th value of the generator.
// launch one thread and create two engines
// make engine1 skip to the 2^64th 128 bit while being at thread 0
// make engine2 skip to the 2^64th 128 bit while being at 2^64th thread
// Assert that engine2 should be increment_val+1 steps behind engine1.
if (!at::cuda::is_available()) return;
test_engine_offset2();
cudaDeviceSynchronize();
bool isEQ = cudaGetLastError() == cudaSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineOffset3(){
unsigned long long increment_val = ::ldexp(1.0, 64);
at::Philox4_32 engine1(123, 0, increment_val);
at::Philox4_32 engine2(123, 1, 0);
engine1.incr();
assert(engine1() == engine2());
}
void test_engine_offset3(){
testEngineOffset2<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset3) {
// Test Description:
// Tests edge case in between threads.
// launch one thread and create two engines
// make engine1 skip to the 2^64th 128 bit while being at thread 0
// start engine2 at thread 1, with offset 0
// Assert that engine1 is 1 step behind engine2.
if (!at::cuda::is_available()) return;
test_engine_offset3();
cudaDeviceSynchronize();
bool isEQ = cudaGetLastError() == cudaSuccess;
ASSERT_TRUE(isEQ);
}
__global__ void testEngineThreadIndex(){
at::Philox4_32 engine1(123456, 0, 4);
at::Philox4_32 engine2(123456, 1, 4);
assert(engine1() != engine2());
}
void test_engine_thread_index(){
testEngineThreadIndex<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
TEST(CUDAGeneratorImpl, TestPhiloxEngineIndex) {
// Test Description:
// Tests if thread indexing is working properly.
// launch one thread and create two engines
// with different thread index but same offset.
// Assert that the engines have different sequences.
if (!at::cuda::is_available()) return;
test_engine_thread_index();
cudaDeviceSynchronize();
bool isEQ = cudaGetLastError() == cudaSuccess;
ASSERT_TRUE(isEQ);
}
/*
* CUDA Generator Tests
*/
TEST(CUDAGeneratorImpl, TestGeneratorDynamicCast) {
// Test Description: Check dynamic cast for CUDA
if (!at::cuda::is_available()) return;
auto foo = at::cuda::detail::createCUDAGenerator();
auto result = foo.get<CUDAGeneratorImpl>();
ASSERT_EQ(typeid(at::CUDAGeneratorImpl*).hash_code(), typeid(result).hash_code());
}
TEST(CUDAGeneratorImpl, TestDefaultGenerator) {
// Test Description:
// Check if default generator state is created only once
// address of generator should be same in all calls
if (!at::cuda::is_available()) return;
auto foo = at::cuda::detail::getDefaultCUDAGenerator();
auto bar = at::cuda::detail::getDefaultCUDAGenerator();
ASSERT_EQ(foo, bar);
if (c10::cuda::device_count() >= 2) {
foo = at::cuda::detail::getDefaultCUDAGenerator(1);
bar = at::cuda::detail::getDefaultCUDAGenerator(1);
ASSERT_EQ(foo, bar);
foo = at::cuda::detail::getDefaultCUDAGenerator(0);
bar = at::cuda::detail::getDefaultCUDAGenerator(1);
ASSERT_NE(foo, bar);
}
}
TEST(CUDAGeneratorImpl, TestCloning) {
// Test Description:
// Check cloning of new generators.
// Note that we don't allow cloning of other
// generator states into default generators.
if (!at::cuda::is_available()) return;
auto gen1 = at::cuda::detail::createCUDAGenerator();
gen1.set_current_seed(123); // modify gen1 state
auto cuda_gen1 = check_generator<CUDAGeneratorImpl>(gen1);
cuda_gen1->set_philox_offset_per_thread(4);
auto gen2 = at::cuda::detail::createCUDAGenerator();
gen2 = gen1.clone();
auto cuda_gen2 = check_generator<CUDAGeneratorImpl>(gen2);
ASSERT_EQ(gen1.current_seed(), gen2.current_seed());
ASSERT_EQ(
cuda_gen1->philox_offset_per_thread(),
cuda_gen2->philox_offset_per_thread()
);
}
void thread_func_get_set_current_seed(Generator generator) {
std::lock_guard<std::mutex> lock(generator.mutex());
auto current_seed = generator.current_seed();
current_seed++;
generator.set_current_seed(current_seed);
}
TEST(CUDAGeneratorImpl, TestMultithreadingGetSetCurrentSeed) {
// Test Description:
// Test current seed getter and setter are thread safe
// See Note [Acquire lock when using random generators]
if (!at::cuda::is_available()) return;
auto gen1 = at::cuda::detail::getDefaultCUDAGenerator();
auto initial_seed = gen1.current_seed();
std::thread t0{thread_func_get_set_current_seed, gen1};
std::thread t1{thread_func_get_set_current_seed, gen1};
std::thread t2{thread_func_get_set_current_seed, gen1};
t0.join();
t1.join();
t2.join();
ASSERT_EQ(gen1.current_seed(), initial_seed+3);
}
TEST(CUDAGeneratorImpl, TestRNGForking) {
// Test Description:
// Test that state of a generator can be frozen and
// restored
// See Note [Acquire lock when using random generators]
if (!at::cuda::is_available()) return;
auto default_gen = at::cuda::detail::getDefaultCUDAGenerator();
auto current_gen = at::cuda::detail::createCUDAGenerator();
{
std::lock_guard<std::mutex> lock(default_gen.mutex());
current_gen = default_gen.clone(); // capture the current state of default generator
}
auto target_value = at::randn({1000}, at::kCUDA);
// Dramatically alter the internal state of the main generator
auto x = at::randn({100000}, at::kCUDA);
auto forked_value = at::randn({1000}, current_gen, at::kCUDA);
ASSERT_EQ(target_value.sum().item<double>(), forked_value.sum().item<double>());
}
void makeRandomNumber() {
cudaSetDevice(std::rand() % 2);
auto x = at::randn({1000});
}
void testCudaRNGMultithread() {
auto threads = std::vector<std::thread>();
for (auto i = 0; i < 1000; i++) {
threads.emplace_back(makeRandomNumber);
}
for (auto& t : threads) {
t.join();
}
};
TEST(CUDAGeneratorImpl, TestMultithreadRNG) {
if (!at::cuda::is_available()) return;
testCudaRNGMultithread();
}
|
130aa548d6d1588e10def289085a8305067895f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#define N 50
#define B 2
#define T 32
__global__ void dl(int* in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < N)
{
if(in[tid] % 2 == 0)
in[tid]++;
__syncthreads(); // ouch
int sum = in[tid];
if(tid > 0)
sum += in[tid-1];
if(tid < N - 1)
sum += in[tid+1];
in[tid] = sum / 3;
}
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i] = i;
int* din;
hipMalloc((void**)&din, N*sizeof(int));
hipMemcpy(din, in, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dl), dim3(B),dim3(T), 0, 0, din);
hipMemcpy(in, din, N*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); hipFree(din);
} | 130aa548d6d1588e10def289085a8305067895f4.cu | #include <cstdio>
#define N 50
#define B 2
#define T 32
__global__ void dl(int* in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < N)
{
if(in[tid] % 2 == 0)
in[tid]++;
__syncthreads(); // ouch
int sum = in[tid];
if(tid > 0)
sum += in[tid-1];
if(tid < N - 1)
sum += in[tid+1];
in[tid] = sum / 3;
}
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i] = i;
int* din;
cudaMalloc((void**)&din, N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
dl<<<B,T>>>(din);
cudaMemcpy(in, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din);
} |
2ee7a7e44621dee898307bcf8e992eb074fdfb4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/shuffle_channel_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ShuffleChannelKernel(const int nthreads, const int feature_map_size,
Dtype *output, const Dtype *input, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / group_row / group_column;
const int i = (index / group_column) % group_row;
const int j = index % group_column;
const Dtype* p_i = input + n * feature_map_size + (i * group_column + j) * len;
Dtype* p_o = output + n * feature_map_size + (j * group_row + i) * len;
for (int k = 0; k < len; k++)
p_o[k] = p_i[k];
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Resize_gpu(Dtype *output, const Dtype *input, int group_row, int group_column, int len)
{
for (int i = 0; i < group_row; ++i) // 2
{
for(int j = 0; j < group_column ; ++j) // 3
{
const Dtype* p_i = input + (i * group_column + j ) * len;
Dtype* p_o = output + (j * group_row + i ) * len;
caffe_copy(len, p_i, p_o);
}
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = group_;
int group_column = int(chs / group_row);
CHECK_EQ(chs, (group_column * group_row)) << "Wrong group size.";
int count = num * group_column * group_row;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, top_data, bottom_data, group_row, group_column, sp_sz);
//Dtype* temp_data = temp_blob_.mutable_gpu_data();
//for(int n = 0; n < num; ++n)
//{
// Resize_gpu(top_data + n*feature_map_size, bottom_data + n*feature_map_size, group_row, group_column, sp_sz);
//}
//caffe_copy(bottom[0]->count(), temp_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = int(chs / group_);
int group_column = group_;
int count = num * group_column * group_row;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, bottom_diff, top_diff, group_row, group_column, sp_sz);
//Dtype* temp_diff = temp_blob_.mutable_gpu_diff();
// for(int n = 0; n < num; ++n)
// {
//Resize_gpu(bottom_diff + n * feature_map_size, top_diff + n*feature_map_size, group_row, group_column, sp_sz);
// }
//caffe_copy(top[0]->count(), temp_blob_.gpu_diff(), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ShuffleChannelLayer);
} // namespace caffe
| 2ee7a7e44621dee898307bcf8e992eb074fdfb4a.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/shuffle_channel_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ShuffleChannelKernel(const int nthreads, const int feature_map_size,
Dtype *output, const Dtype *input, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / group_row / group_column;
const int i = (index / group_column) % group_row;
const int j = index % group_column;
const Dtype* p_i = input + n * feature_map_size + (i * group_column + j) * len;
Dtype* p_o = output + n * feature_map_size + (j * group_row + i) * len;
for (int k = 0; k < len; k++)
p_o[k] = p_i[k];
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Resize_gpu(Dtype *output, const Dtype *input, int group_row, int group_column, int len)
{
for (int i = 0; i < group_row; ++i) // 2
{
for(int j = 0; j < group_column ; ++j) // 3
{
const Dtype* p_i = input + (i * group_column + j ) * len;
Dtype* p_o = output + (j * group_row + i ) * len;
caffe_copy(len, p_i, p_o);
}
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = group_;
int group_column = int(chs / group_row);
CHECK_EQ(chs, (group_column * group_row)) << "Wrong group size.";
int count = num * group_column * group_row;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, top_data, bottom_data, group_row, group_column, sp_sz);
//Dtype* temp_data = temp_blob_.mutable_gpu_data();
//for(int n = 0; n < num; ++n)
//{
// Resize_gpu(top_data + n*feature_map_size, bottom_data + n*feature_map_size, group_row, group_column, sp_sz);
//}
//caffe_copy(bottom[0]->count(), temp_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = int(chs / group_);
int group_column = group_;
int count = num * group_column * group_row;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, bottom_diff, top_diff, group_row, group_column, sp_sz);
//Dtype* temp_diff = temp_blob_.mutable_gpu_diff();
// for(int n = 0; n < num; ++n)
// {
//Resize_gpu(bottom_diff + n * feature_map_size, top_diff + n*feature_map_size, group_row, group_column, sp_sz);
// }
//caffe_copy(top[0]->count(), temp_blob_.gpu_diff(), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ShuffleChannelLayer);
} // namespace caffe
|
71cd0d39122de5f57d07d9ae95a83adbd5587259.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "contrib_ops/cuda/math/binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace contrib {
namespace cuda {
#define OP(name, expr) \
template <class T> \
struct OP_##name { \
__device__ __inline__ T operator()(T a, T b) const { \
return (expr); \
} \
};
#define CONTRIB_BINARY_ELEMENTWISE_IMPL(name) \
CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(stream, \
output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T>(), \
count); \
}
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(hipStream_t stream, \
int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \
const onnxruntime::cuda::fast_divmod& fdm_H, \
const onnxruntime::cuda::fast_divmod& fdm_C, \
T* output_data, size_t count);
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t)
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for op and impl
#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
CONTRIB_BINARY_ELEMENTWISE_IMPL(name)
CONTRIB_BINARY_OPS()
#undef CONTRIB_BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(BiasGelu)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 71cd0d39122de5f57d07d9ae95a83adbd5587259.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "contrib_ops/cuda/math/binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace contrib {
namespace cuda {
#define OP(name, expr) \
template <class T> \
struct OP_##name { \
__device__ __inline__ T operator()(T a, T b) const { \
return (expr); \
} \
};
#define CONTRIB_BINARY_ELEMENTWISE_IMPL(name) \
CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(stream, \
output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T>(), \
count); \
}
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(cudaStream_t stream, \
int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \
const onnxruntime::cuda::fast_divmod& fdm_H, \
const onnxruntime::cuda::fast_divmod& fdm_C, \
T* output_data, size_t count);
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t)
#define CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for op and impl
#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
CONTRIB_BINARY_ELEMENTWISE_IMPL(name)
CONTRIB_BINARY_OPS()
#undef CONTRIB_BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
CONTRIB_SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(BiasGelu)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
a4a3c88a994d9483dbec44b13e46a3b7525df5d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmgecsrmv.cu, normal z -> c, Tue Aug 30 09:38:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
cmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaFloatComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaFloatComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
hipLaunchKernelGGL(( cmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream(),
m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
| a4a3c88a994d9483dbec44b13e46a3b7525df5d1.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmgecsrmv.cu, normal z -> c, Tue Aug 30 09:38:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
cmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaFloatComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaFloatComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
cmgecsrmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream()>>>
(m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
|
ec548fa873656a1575c6a29919c66a5f5662a2d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 2nd order FD scheme for gradient with uniform mesh spacing
#include <stdio.h>
__constant__ int mx, my, mz;
__constant__ double dxInv, dyInv, dzInv;
__global__ void gradient_x(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (i > 0 && i < mx-1) {
df[globalIdx] = (-0.5*f[globalIdx-1] + 0.5*f[globalIdx+1])*dxInv;
} else if (i == 0 && mx > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+1])*dxInv;
} else if (i == mx-1 && mx > 1) {
df[globalIdx] = (-f[globalIdx-1] + f[globalIdx])*dxInv;
}
}
__global__ void gradient_y(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (j > 0 && j < my-1) {
df[globalIdx] = (-0.5*f[globalIdx-mx] + 0.5*f[globalIdx+mx])*dyInv;
} else if (j == 0 && my > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+mx])*dyInv;
} else if (j == my-1 && my > 1) {
df[globalIdx] = (-f[globalIdx-mx] + f[globalIdx])*dyInv;
}
}
__global__ void gradient_z(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (k > 0 && k < mz-1) {
df[globalIdx] = (-0.5*f[globalIdx-mx*my] + 0.5*f[globalIdx+mx*my])*dzInv;
} else if (k == 0 && mz > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+mx*my])*dzInv;
} else if (k == mz-1 && mz > 1) {
df[globalIdx] = (-f[globalIdx-mx*my] + f[globalIdx])*dzInv;
}
}
| ec548fa873656a1575c6a29919c66a5f5662a2d0.cu | // 2nd order FD scheme for gradient with uniform mesh spacing
#include <stdio.h>
__constant__ int mx, my, mz;
__constant__ double dxInv, dyInv, dzInv;
__global__ void gradient_x(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (i > 0 && i < mx-1) {
df[globalIdx] = (-0.5*f[globalIdx-1] + 0.5*f[globalIdx+1])*dxInv;
} else if (i == 0 && mx > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+1])*dxInv;
} else if (i == mx-1 && mx > 1) {
df[globalIdx] = (-f[globalIdx-1] + f[globalIdx])*dxInv;
}
}
__global__ void gradient_y(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (j > 0 && j < my-1) {
df[globalIdx] = (-0.5*f[globalIdx-mx] + 0.5*f[globalIdx+mx])*dyInv;
} else if (j == 0 && my > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+mx])*dyInv;
} else if (j == my-1 && my > 1) {
df[globalIdx] = (-f[globalIdx-mx] + f[globalIdx])*dyInv;
}
}
__global__ void gradient_z(double* f, double* df) {
//f: vector of function values
//df: vector of derivatives (output)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z;
int globalIdx = k * mx * my + j * mx + i;
if (k > 0 && k < mz-1) {
df[globalIdx] = (-0.5*f[globalIdx-mx*my] + 0.5*f[globalIdx+mx*my])*dzInv;
} else if (k == 0 && mz > 1) {
df[globalIdx] = (-f[globalIdx] + f[globalIdx+mx*my])*dzInv;
} else if (k == mz-1 && mz > 1) {
df[globalIdx] = (-f[globalIdx-mx*my] + f[globalIdx])*dzInv;
}
}
|
6f4fb61cc2c5971428ffdc8376e55b4f1cf94221.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <getopt.h>
#include "../graph_builder/GraphBuilder.h"
#include "../graph_builder/Graph.h"
#include "../util/Timer.h"
#include "../util/CudaHelper.h"
#include "../util/DeviceMemoryCUDA.h"
#include "pr.cuh"
#define ITERATION (20)
int main(int argc, char** argv) {
std::string inputFile;
// ----- Parse command line argument -----
extern char* optarg;
char c;
while ((c = getopt(argc, argv, "f:")) != -1) {
switch (c) {
case 'f':
inputFile = std::string(optarg);
break;
default:
printf("wrong argument\n");
exit(-1);
}
}
//--- Build graph ---
Graph* g = (new GraphBuilder(inputFile))->buildGraph();
g->print();
unsigned numOfNodes = g->getNumOfNodes();
unsigned numOfEdges = g->getNumOfEdges();
unsigned numOfVirtualNodes = g->getNumOfVirtualNodes();
// ----- DeviceMemory initialization for graph representation -----
DeviceMemory<uint32_t> d_vid_list(numOfVirtualNodes);
DeviceMemory<uint32_t> d_edge_list(numOfEdges);
// ----- DeviceMemory initialization for application-specific data -----
DeviceMemory<float> d_contrib(numOfNodes);
DeviceMemory<float> d_incoming_total(numOfNodes);
DeviceMemory<int> d_out_degrees(numOfNodes);
// Host Side
int* h_outDegrees = g->getOutDegrees();
float* h_incomingTotal = new float[numOfNodes];
float* h_contrib = new float[numOfNodes];
#pragma omp parallel for
for (uint32_t i = 0; i < numOfNodes; ++i) {
h_incomingTotal[i] = 0;
h_contrib[i] = (1.0f / numOfNodes) / h_outDegrees[i];
}
// memcopy from host to device
d_vid_list = g->getVertexIDList();
d_edge_list = g->getEdgeList();
d_contrib = h_contrib;
d_incoming_total = h_incomingTotal;
d_out_degrees = g->getOutDegrees();
// ----- run graph application -----
hipStream_t st[6];
for (int i = 0; i < 6; ++i)
hipStreamCreate(&st[i]);
uint32_t degree_list[6] = {1, 2, 4, 8, 16, 32};
uint32_t vertex_offset[6];
uint32_t edge_offset[6];
uint32_t vertex_offset_sum = 0;
uint32_t edge_offset_sum = 0;
for (uint32_t i = 0; i < 6; ++i) {
vertex_offset[i] = vertex_offset_sum;
edge_offset[i] = edge_offset_sum;
vertex_offset_sum += g->getNumOfVirtualNodesAt(i);
edge_offset_sum += (g->getNumOfVirtualNodesAt(i) * degree_list[i]);
}
Timer t;
t.start();
for (int iter = 0; iter < ITERATION; ++iter) {
for (uint32_t i = 0; i < 6; ++i) {
uint32_t numOfCurrentVirtualNodes = g->getNumOfVirtualNodesAt(i);
uint32_t numBlocks = (numOfCurrentVirtualNodes % 512 == 0) ?
numOfCurrentVirtualNodes / 512:
numOfCurrentVirtualNodes / 256 + 1;
if (numBlocks != 0) {
hipLaunchKernelGGL(( pr), dim3(numBlocks), dim3(512), 0, st[i],
d_incoming_total.getDevicePtr(),
d_contrib.getDevicePtr(),
d_vid_list.getDevicePtr() + vertex_offset[i],
d_edge_list.getDevicePtr() + edge_offset[i],
degree_list[i],
i,
numOfCurrentVirtualNodes);
}
}
cudaErrorCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( pr_update), dim3(numOfNodes / 1024 + 1), dim3(1024), 0, 0,
d_incoming_total.getDevicePtr(),
d_contrib.getDevicePtr(),
d_out_degrees.getDevicePtr(),
(1.0f - 0.85) / numOfNodes,
numOfNodes);
cudaErrorCheck(hipDeviceSynchronize());
}
t.stop();
return 0;
}
| 6f4fb61cc2c5971428ffdc8376e55b4f1cf94221.cu | #include <iostream>
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#include <getopt.h>
#include "../graph_builder/GraphBuilder.h"
#include "../graph_builder/Graph.h"
#include "../util/Timer.h"
#include "../util/CudaHelper.h"
#include "../util/DeviceMemoryCUDA.h"
#include "pr.cuh"
#define ITERATION (20)
int main(int argc, char** argv) {
std::string inputFile;
// ----- Parse command line argument -----
extern char* optarg;
char c;
while ((c = getopt(argc, argv, "f:")) != -1) {
switch (c) {
case 'f':
inputFile = std::string(optarg);
break;
default:
printf("wrong argument\n");
exit(-1);
}
}
//--- Build graph ---
Graph* g = (new GraphBuilder(inputFile))->buildGraph();
g->print();
unsigned numOfNodes = g->getNumOfNodes();
unsigned numOfEdges = g->getNumOfEdges();
unsigned numOfVirtualNodes = g->getNumOfVirtualNodes();
// ----- DeviceMemory initialization for graph representation -----
DeviceMemory<uint32_t> d_vid_list(numOfVirtualNodes);
DeviceMemory<uint32_t> d_edge_list(numOfEdges);
// ----- DeviceMemory initialization for application-specific data -----
DeviceMemory<float> d_contrib(numOfNodes);
DeviceMemory<float> d_incoming_total(numOfNodes);
DeviceMemory<int> d_out_degrees(numOfNodes);
// Host Side
int* h_outDegrees = g->getOutDegrees();
float* h_incomingTotal = new float[numOfNodes];
float* h_contrib = new float[numOfNodes];
#pragma omp parallel for
for (uint32_t i = 0; i < numOfNodes; ++i) {
h_incomingTotal[i] = 0;
h_contrib[i] = (1.0f / numOfNodes) / h_outDegrees[i];
}
// memcopy from host to device
d_vid_list = g->getVertexIDList();
d_edge_list = g->getEdgeList();
d_contrib = h_contrib;
d_incoming_total = h_incomingTotal;
d_out_degrees = g->getOutDegrees();
// ----- run graph application -----
cudaStream_t st[6];
for (int i = 0; i < 6; ++i)
cudaStreamCreate(&st[i]);
uint32_t degree_list[6] = {1, 2, 4, 8, 16, 32};
uint32_t vertex_offset[6];
uint32_t edge_offset[6];
uint32_t vertex_offset_sum = 0;
uint32_t edge_offset_sum = 0;
for (uint32_t i = 0; i < 6; ++i) {
vertex_offset[i] = vertex_offset_sum;
edge_offset[i] = edge_offset_sum;
vertex_offset_sum += g->getNumOfVirtualNodesAt(i);
edge_offset_sum += (g->getNumOfVirtualNodesAt(i) * degree_list[i]);
}
Timer t;
t.start();
for (int iter = 0; iter < ITERATION; ++iter) {
for (uint32_t i = 0; i < 6; ++i) {
uint32_t numOfCurrentVirtualNodes = g->getNumOfVirtualNodesAt(i);
uint32_t numBlocks = (numOfCurrentVirtualNodes % 512 == 0) ?
numOfCurrentVirtualNodes / 512:
numOfCurrentVirtualNodes / 256 + 1;
if (numBlocks != 0) {
pr<<<numBlocks, 512, 0, st[i]>>>(
d_incoming_total.getDevicePtr(),
d_contrib.getDevicePtr(),
d_vid_list.getDevicePtr() + vertex_offset[i],
d_edge_list.getDevicePtr() + edge_offset[i],
degree_list[i],
i,
numOfCurrentVirtualNodes);
}
}
cudaErrorCheck(cudaDeviceSynchronize());
pr_update<<<numOfNodes / 1024 + 1, 1024>>>(
d_incoming_total.getDevicePtr(),
d_contrib.getDevicePtr(),
d_out_degrees.getDevicePtr(),
(1.0f - 0.85) / numOfNodes,
numOfNodes);
cudaErrorCheck(cudaDeviceSynchronize());
}
t.stop();
return 0;
}
|
360dc72ee16e5d3d457e4e74c3eae657c0fa6a5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cuda_runtime.h>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
int main(void)
{
int c;
int* dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add) , dim3(1), dim3(1) , 0, 0, 2, 7, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d = %d\n", 2, 7, c);
hipFree(dev_c);
return 0;
} | 360dc72ee16e5d3d457e4e74c3eae657c0fa6a5d.cu | #include<iostream>
#include<cuda_runtime.h>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
int main(void)
{
int c;
int* dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
add <<<1, 1 >>> (2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", 2, 7, c);
cudaFree(dev_c);
return 0;
} |
f2407f6633ef22c5dc0d8e8ed97f9b20a5571490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void knapsackGPU2(int* dp, int* d_value, int* d_weight, int capacity,int n)
{
int in = threadIdx.x + (blockDim.x * blockIdx.x);
for (int row = 0;row <= n;row++)
{
if (row != 0)
{
int ind = in + (row * (capacity + 1));
if (in <= (capacity + 1) && in > 0)
{
if (in >= d_weight[row - 1])
{
dp[ind] = dp[ind - (capacity + 1)] > (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]) ? dp[ind - (capacity + 1)] : (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]);
}
else
dp[ind] = dp[ind - (capacity + 1)];
}
if (in == 0)
{
dp[ind] = 0;
}
}
else
{
dp[in] = 0;
}
}
} | f2407f6633ef22c5dc0d8e8ed97f9b20a5571490.cu | #include "includes.h"
__global__ void knapsackGPU2(int* dp, int* d_value, int* d_weight, int capacity,int n)
{
int in = threadIdx.x + (blockDim.x * blockIdx.x);
for (int row = 0;row <= n;row++)
{
if (row != 0)
{
int ind = in + (row * (capacity + 1));
if (in <= (capacity + 1) && in > 0)
{
if (in >= d_weight[row - 1])
{
dp[ind] = dp[ind - (capacity + 1)] > (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]) ? dp[ind - (capacity + 1)] : (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]);
}
else
dp[ind] = dp[ind - (capacity + 1)];
}
if (in == 0)
{
dp[ind] = 0;
}
}
else
{
dp[in] = 0;
}
}
} |
328c4b930643be8704c451ab61aa5aae639b8b6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
typedef uint64_t u64;
/* kernel input:
* E: adjacency matrix as bit mask, (E[i]>>j & 1) is E(i,j)
* n: number of vertices. must be 5 ~ 64
* ans: output answer, ans[i*64+j] is j+1 sparsest subgraph from i-th block
* lv: tweak the kernel. must be 1 ~ n-4 and less than 7
*/
__global__ void subgraph_kern(u64 *E, int n, int *ans, int lv) {
const int max_lv = 7;
int laneid = threadIdx.x & 31;
int tid = threadIdx.x;
int gid = tid + blockDim.x * blockIdx.x;
int gsize = blockDim.x * gridDim.x;
__shared__ int s[64][32];
for (int i = tid>>5; i < 64; i += blockDim.x>>5) {
s[i][laneid] = 100000;
}
__syncthreads();
int good = 0;
int bu[16] = {0};
for (int i = 1; i < 16; i++) {
bu[i] = __popc(E[31-__clz(i&-i)] & (i ^ i>>1));
}
for (u64 t = gid; t < 1ull<<(n-(lv+4)); t += gsize) {
int s0, s1, s2, s3, s4;
s0 = s1 = s2 = s3 = s4 = 100000;
// shift register, to reduce shared memory usage
int sL[max_lv], sR[max_lv];
for (int j = 0; j < max_lv; j++) sL[j] = 100000;
for (int j = 0; j < max_lv; j++) sR[j] = 100000;
// get subproblem
u64 actual = t<<(lv+4);
good = 0;
for (int j = lv+4; j < n; j++) {
if (actual>>j & 1) {
good += __popcll(actual & E[j]);
}
}
good >>= 1;
for (int i = 0; i < 1<<lv; i += 1) {
u64 mask = actual + ((i ^ i>>1) << 4);
if (i) {
int z = 31-__clz(i&-i);
int diff = __popcll(E[z+4] & mask);
if ((i ^ i>>1)>>z & 1) {
// add vertex
good += diff;
#pragma loop unroll
for (int j = max_lv-1; j > 0; j--) sL[j] = sL[j-1];
sL[0] = s0;
s0 = s1; s1 = s2; s2 = s3; s3 = s4;
s4 = sR[0];
#pragma loop unroll
for (int j = 0; j < max_lv-1; j++) sR[j] = sR[j+1];
}
else {
// remove vertex
good -= diff;
#pragma unroll
for (int j = max_lv-1; j > 0; j--) sR[j] = sR[j-1];
sR[0] = s4;
s4 = s3; s3 = s2; s2 = s1; s1 = s0;
s0 = sL[0];
#pragma unroll
for (int j = 0; j < max_lv-1; j++) sL[j] = sL[j+1];
}
}
int g = good;
int E0 = __popcll(E[0] & mask);
int E1 = __popcll(E[1] & mask);
int E2 = __popcll(E[2] & mask);
int E3 = __popcll(E[3] & mask);
if (g < s0) s0 = g;
g += E0 + bu[1];
if (g < s1) s1 = g;
g += E1 + bu[2];
if (g < s2) s2 = g;
g -= E0 + bu[3];
if (g < s1) s1 = g;
g += E2 + bu[4];
if (g < s2) s2 = g;
g += E0 + bu[5];
if (g < s3) s3 = g;
g -= E1 + bu[6];
if (g < s2) s2 = g;
g -= E0 + bu[7];
if (g < s1) s1 = g;
g += E3 + bu[8];
if (g < s2) s2 = g;
g += E0 + bu[9];
if (g < s3) s3 = g;
g += E1 + bu[10];
if (g < s4) s4 = g;
g -= E0 + bu[11];
if (g < s3) s3 = g;
g -= E2 + bu[12];
if (g < s2) s2 = g;
g += E0 + bu[13];
if (g < s3) s3 = g;
g -= E1 + bu[14];
if (g < s2) s2 = g;
g -= E0 + bu[15];
if (g < s1) s1 = g;
}
int b = __popcll(actual);
if (b) atomicMin(&s[b-1][laneid], sL[0]);
atomicMin(&s[b+0][laneid], s0);
atomicMin(&s[b+1][laneid], s1);
atomicMin(&s[b+2][laneid], s2);
atomicMin(&s[b+3][laneid], s3);
atomicMin(&s[b+4][laneid], s4);
for (int j = 0; j < max_lv-1; j++) {
atomicMin(&s[b+5+j][laneid], sR[j]);
}
}
// combine result from each thread
__syncthreads();
for (int step = 16; step >= 1; step>>=1) {
for (int i = tid>>5; i < 64; i += blockDim.x>>5) {
if (laneid < step)
s[i][laneid] = min(s[i][laneid], s[i][laneid+step]);
}
__syncthreads();
}
for (int i = tid; i < 64; i += blockDim.x) {
ans[blockIdx.x * 64 + i] = s[i][0];
}
}
int mypopcnt(unsigned x) {
int n = 0;
for (int i = 0; i < 32; i++) {
n += x>>i & 1;
}
return n;
}
int main() {
int n;
u64 E[64];
std::cin >> n;
if (n < 0 || n > 64) exit(1);
for(int i=0;i<n;i++){
u64 good = 0, y = 0;
for(int j=0;j<n;j++){
u64 w;
if (std::cin>>w) good = 1;
y|=w<<j;
}
if (good) E[i] = y;
}
int blocksize = 50;
int lv = n - 12;
int *ans;
if (lv < 1) lv = 1;
if (lv > 7) lv = 7;
if (n < 19) blocksize = 1;
else if (n < 25) blocksize = 1<<(n-19);
ans = new int[64 * blocksize];
if (n >= 5) {
u64 *gpu_E;
int *gpu_ans;
hipMalloc(&gpu_E, sizeof(u64) * 64);
hipMalloc(&gpu_ans, sizeof(int) * 64 * blocksize);
hipMemcpy(gpu_E, E, sizeof(u64) * 64, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( subgraph_kern), dim3(blocksize), dim3(256), 0, 0, gpu_E, n, gpu_ans, lv);
hipMemcpy(ans, gpu_ans, sizeof(int) * 64 * blocksize, hipMemcpyDeviceToHost);
for (int i = 1; i < blocksize; i++) {
for (int j = 0; j < 64; j++) {
if (ans[i*64+j] < ans[j]) ans[j] = ans[i*64+j];
}
}
}
else {
// small case
for (int j = 0; j < 64; j++) ans[j] = 100000;
for (int i = 0; i < 1<<n; i++) {
int z = mypopcnt(i);
int sum = 0;
for (int j = 0; j < n; j++) {
if (i>>j & 1)
sum += mypopcnt(i & E[j]);
}
sum >>= 1;
if (z && sum < ans[z-1]) ans[z-1] = sum;
}
}
for (int i = 1; i < n; i++) {
std::cout << i+1 << ' ' << ans[i] << '\n';
}
return 0;
}
| 328c4b930643be8704c451ab61aa5aae639b8b6d.cu | #include<iostream>
typedef uint64_t u64;
/* kernel input:
* E: adjacency matrix as bit mask, (E[i]>>j & 1) is E(i,j)
* n: number of vertices. must be 5 ~ 64
* ans: output answer, ans[i*64+j] is j+1 sparsest subgraph from i-th block
* lv: tweak the kernel. must be 1 ~ n-4 and less than 7
*/
__global__ void subgraph_kern(u64 *E, int n, int *ans, int lv) {
const int max_lv = 7;
int laneid = threadIdx.x & 31;
int tid = threadIdx.x;
int gid = tid + blockDim.x * blockIdx.x;
int gsize = blockDim.x * gridDim.x;
__shared__ int s[64][32];
for (int i = tid>>5; i < 64; i += blockDim.x>>5) {
s[i][laneid] = 100000;
}
__syncthreads();
int good = 0;
int bu[16] = {0};
for (int i = 1; i < 16; i++) {
bu[i] = __popc(E[31-__clz(i&-i)] & (i ^ i>>1));
}
for (u64 t = gid; t < 1ull<<(n-(lv+4)); t += gsize) {
int s0, s1, s2, s3, s4;
s0 = s1 = s2 = s3 = s4 = 100000;
// shift register, to reduce shared memory usage
int sL[max_lv], sR[max_lv];
for (int j = 0; j < max_lv; j++) sL[j] = 100000;
for (int j = 0; j < max_lv; j++) sR[j] = 100000;
// get subproblem
u64 actual = t<<(lv+4);
good = 0;
for (int j = lv+4; j < n; j++) {
if (actual>>j & 1) {
good += __popcll(actual & E[j]);
}
}
good >>= 1;
for (int i = 0; i < 1<<lv; i += 1) {
u64 mask = actual + ((i ^ i>>1) << 4);
if (i) {
int z = 31-__clz(i&-i);
int diff = __popcll(E[z+4] & mask);
if ((i ^ i>>1)>>z & 1) {
// add vertex
good += diff;
#pragma loop unroll
for (int j = max_lv-1; j > 0; j--) sL[j] = sL[j-1];
sL[0] = s0;
s0 = s1; s1 = s2; s2 = s3; s3 = s4;
s4 = sR[0];
#pragma loop unroll
for (int j = 0; j < max_lv-1; j++) sR[j] = sR[j+1];
}
else {
// remove vertex
good -= diff;
#pragma unroll
for (int j = max_lv-1; j > 0; j--) sR[j] = sR[j-1];
sR[0] = s4;
s4 = s3; s3 = s2; s2 = s1; s1 = s0;
s0 = sL[0];
#pragma unroll
for (int j = 0; j < max_lv-1; j++) sL[j] = sL[j+1];
}
}
int g = good;
int E0 = __popcll(E[0] & mask);
int E1 = __popcll(E[1] & mask);
int E2 = __popcll(E[2] & mask);
int E3 = __popcll(E[3] & mask);
if (g < s0) s0 = g;
g += E0 + bu[1];
if (g < s1) s1 = g;
g += E1 + bu[2];
if (g < s2) s2 = g;
g -= E0 + bu[3];
if (g < s1) s1 = g;
g += E2 + bu[4];
if (g < s2) s2 = g;
g += E0 + bu[5];
if (g < s3) s3 = g;
g -= E1 + bu[6];
if (g < s2) s2 = g;
g -= E0 + bu[7];
if (g < s1) s1 = g;
g += E3 + bu[8];
if (g < s2) s2 = g;
g += E0 + bu[9];
if (g < s3) s3 = g;
g += E1 + bu[10];
if (g < s4) s4 = g;
g -= E0 + bu[11];
if (g < s3) s3 = g;
g -= E2 + bu[12];
if (g < s2) s2 = g;
g += E0 + bu[13];
if (g < s3) s3 = g;
g -= E1 + bu[14];
if (g < s2) s2 = g;
g -= E0 + bu[15];
if (g < s1) s1 = g;
}
int b = __popcll(actual);
if (b) atomicMin(&s[b-1][laneid], sL[0]);
atomicMin(&s[b+0][laneid], s0);
atomicMin(&s[b+1][laneid], s1);
atomicMin(&s[b+2][laneid], s2);
atomicMin(&s[b+3][laneid], s3);
atomicMin(&s[b+4][laneid], s4);
for (int j = 0; j < max_lv-1; j++) {
atomicMin(&s[b+5+j][laneid], sR[j]);
}
}
// combine result from each thread
__syncthreads();
for (int step = 16; step >= 1; step>>=1) {
for (int i = tid>>5; i < 64; i += blockDim.x>>5) {
if (laneid < step)
s[i][laneid] = min(s[i][laneid], s[i][laneid+step]);
}
__syncthreads();
}
for (int i = tid; i < 64; i += blockDim.x) {
ans[blockIdx.x * 64 + i] = s[i][0];
}
}
int mypopcnt(unsigned x) {
int n = 0;
for (int i = 0; i < 32; i++) {
n += x>>i & 1;
}
return n;
}
int main() {
int n;
u64 E[64];
std::cin >> n;
if (n < 0 || n > 64) exit(1);
for(int i=0;i<n;i++){
u64 good = 0, y = 0;
for(int j=0;j<n;j++){
u64 w;
if (std::cin>>w) good = 1;
y|=w<<j;
}
if (good) E[i] = y;
}
int blocksize = 50;
int lv = n - 12;
int *ans;
if (lv < 1) lv = 1;
if (lv > 7) lv = 7;
if (n < 19) blocksize = 1;
else if (n < 25) blocksize = 1<<(n-19);
ans = new int[64 * blocksize];
if (n >= 5) {
u64 *gpu_E;
int *gpu_ans;
cudaMalloc(&gpu_E, sizeof(u64) * 64);
cudaMalloc(&gpu_ans, sizeof(int) * 64 * blocksize);
cudaMemcpy(gpu_E, E, sizeof(u64) * 64, cudaMemcpyHostToDevice);
subgraph_kern<<<blocksize, 256>>>(gpu_E, n, gpu_ans, lv);
cudaMemcpy(ans, gpu_ans, sizeof(int) * 64 * blocksize, cudaMemcpyDeviceToHost);
for (int i = 1; i < blocksize; i++) {
for (int j = 0; j < 64; j++) {
if (ans[i*64+j] < ans[j]) ans[j] = ans[i*64+j];
}
}
}
else {
// small case
for (int j = 0; j < 64; j++) ans[j] = 100000;
for (int i = 0; i < 1<<n; i++) {
int z = mypopcnt(i);
int sum = 0;
for (int j = 0; j < n; j++) {
if (i>>j & 1)
sum += mypopcnt(i & E[j]);
}
sum >>= 1;
if (z && sum < ans[z-1]) ans[z-1] = sum;
}
}
for (int i = 1; i < n; i++) {
std::cout << i+1 << ' ' << ans[i] << '\n';
}
return 0;
}
|
d9b70e8af41017bc0e5b31b8ed81679bf9b0cc06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/instance_norm_op.h"
namespace caffe2 {
namespace {
__global__ void InstanceNormMeanKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
float* mean_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
mean_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
mean_data[i] += *input_offset;
input_offset += dim_stride;
}
mean_data[i] /= dim;
}
}
__global__ void InstanceNormInvStdevKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
float epsilon,
const float* input_data,
const float* mean_data,
float* inv_stdev_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
inv_stdev_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
float diff = *input_offset - mean_data[i];
inv_stdev_data[i] += diff * diff;
input_offset += dim_stride;
}
inv_stdev_data[i] /= dim;
inv_stdev_data[i] += epsilon;
inv_stdev_data[i] = 1.0 / sqrtf(inv_stdev_data[i]);
}
}
__global__ void InstanceNormKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* mean_data,
const float* inv_stdev_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
auto index = i;
const auto j = index % dim;
index /= dim;
const auto c = index % C;
index /= C;
const auto n = index;
index = n * N_stride + c * C_stride + j * dim_stride;
const auto stat_idx = n * C + c;
output_data[index] = (input_data[index] - mean_data[stat_idx]) *
inv_stdev_data[stat_idx] * scale_data[c] +
bias_data[c];
}
}
__global__ void InstanceNormGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* output_grad_data,
const float* mean_data,
const float* inv_stdev_data,
float* input_grad_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
auto input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset = *input_offset - mean_data[i];
input_grad_offset += dim_stride;
input_offset += dim_stride;
}
auto temp = 0.0;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
temp += *input_grad_offset * *output_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
temp *= -powf(inv_stdev_data[i], 3.0) / dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
auto mean = 0.0;
for (int j = 0; j < dim; ++j) {
*input_grad_offset *= temp;
*input_grad_offset += *output_grad_offset * inv_stdev_data[i];
mean += *input_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
mean /= dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset -= mean;
*input_grad_offset *= scale_data[c];
input_grad_offset += dim_stride;
}
}
}
__global__ void InstanceNormScaleBiasGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* mean_data,
const float* output_grad_data,
const float* inv_stdev_data,
float* scale_grad_data,
float* bias_grad_data) {
CUDA_1D_KERNEL_LOOP(c, C) {
scale_grad_data[c] = 0;
bias_grad_data[c] = 0;
auto input_offset = input_data + c * C_stride;
auto output_grad_offset = output_grad_data + c * C_stride;
auto mean_offset = mean_data + c;
auto inv_stdev_offset = inv_stdev_data + c;
for (int n = 0; n < N; ++n) {
auto input_offset_inner = input_offset + n * N_stride;
auto output_grad_offset_inner = output_grad_offset + n * N_stride;
for (int i = 0; i < dim; ++i) {
scale_grad_data[c] += (*input_offset_inner - *mean_offset) *
*inv_stdev_offset * *output_grad_offset_inner;
bias_grad_data[c] += *output_grad_offset_inner;
input_offset_inner += dim_stride;
output_grad_offset_inner += dim_stride;
}
mean_offset += C;
inv_stdev_offset += C;
}
}
}
} // namespace
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto output = Output(OUTPUT);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
output->ResizeLike(input);
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
hipLaunchKernelGGL(( InstanceNormKernel),
dim3(CAFFE_GET_BLOCKS(N * C * H * W)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto output = Output(OUTPUT);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
output->ResizeLike(input);
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
hipLaunchKernelGGL(( InstanceNormKernel),
dim3(CAFFE_GET_BLOCKS(N * C * H * W)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
auto input_grad = Output(INPUT_GRAD);
auto scale_grad = Output(SCALE_GRAD);
auto bias_grad = Output(BIAS_GRAD);
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(3));
input_grad->ResizeLike(input);
scale_grad->ResizeLike(scale);
bias_grad->ResizeLike(bias);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
hipLaunchKernelGGL(( InstanceNormScaleBiasGradientKernel),
dim3(CAFFE_GET_BLOCKS(C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
hipLaunchKernelGGL(( InstanceNormGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
auto input_grad = Output(INPUT_GRAD);
auto scale_grad = Output(SCALE_GRAD);
auto bias_grad = Output(BIAS_GRAD);
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(3));
input_grad->ResizeLike(input);
scale_grad->ResizeLike(scale);
bias_grad->ResizeLike(bias);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
hipLaunchKernelGGL(( InstanceNormScaleBiasGradientKernel),
dim3(CAFFE_GET_BLOCKS(C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
hipLaunchKernelGGL(( InstanceNormGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
REGISTER_CUDA_OPERATOR(InstanceNorm, InstanceNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
InstanceNormGradient,
InstanceNormGradientOp<float, CUDAContext>);
} // namespace caffe2
| d9b70e8af41017bc0e5b31b8ed81679bf9b0cc06.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/instance_norm_op.h"
namespace caffe2 {
namespace {
__global__ void InstanceNormMeanKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
float* mean_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
mean_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
mean_data[i] += *input_offset;
input_offset += dim_stride;
}
mean_data[i] /= dim;
}
}
__global__ void InstanceNormInvStdevKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
float epsilon,
const float* input_data,
const float* mean_data,
float* inv_stdev_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
inv_stdev_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
float diff = *input_offset - mean_data[i];
inv_stdev_data[i] += diff * diff;
input_offset += dim_stride;
}
inv_stdev_data[i] /= dim;
inv_stdev_data[i] += epsilon;
inv_stdev_data[i] = 1.0 / sqrtf(inv_stdev_data[i]);
}
}
__global__ void InstanceNormKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* mean_data,
const float* inv_stdev_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
auto index = i;
const auto j = index % dim;
index /= dim;
const auto c = index % C;
index /= C;
const auto n = index;
index = n * N_stride + c * C_stride + j * dim_stride;
const auto stat_idx = n * C + c;
output_data[index] = (input_data[index] - mean_data[stat_idx]) *
inv_stdev_data[stat_idx] * scale_data[c] +
bias_data[c];
}
}
__global__ void InstanceNormGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* output_grad_data,
const float* mean_data,
const float* inv_stdev_data,
float* input_grad_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
auto input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset = *input_offset - mean_data[i];
input_grad_offset += dim_stride;
input_offset += dim_stride;
}
auto temp = 0.0;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
temp += *input_grad_offset * *output_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
temp *= -powf(inv_stdev_data[i], 3.0) / dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
auto mean = 0.0;
for (int j = 0; j < dim; ++j) {
*input_grad_offset *= temp;
*input_grad_offset += *output_grad_offset * inv_stdev_data[i];
mean += *input_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
mean /= dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset -= mean;
*input_grad_offset *= scale_data[c];
input_grad_offset += dim_stride;
}
}
}
__global__ void InstanceNormScaleBiasGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* mean_data,
const float* output_grad_data,
const float* inv_stdev_data,
float* scale_grad_data,
float* bias_grad_data) {
CUDA_1D_KERNEL_LOOP(c, C) {
scale_grad_data[c] = 0;
bias_grad_data[c] = 0;
auto input_offset = input_data + c * C_stride;
auto output_grad_offset = output_grad_data + c * C_stride;
auto mean_offset = mean_data + c;
auto inv_stdev_offset = inv_stdev_data + c;
for (int n = 0; n < N; ++n) {
auto input_offset_inner = input_offset + n * N_stride;
auto output_grad_offset_inner = output_grad_offset + n * N_stride;
for (int i = 0; i < dim; ++i) {
scale_grad_data[c] += (*input_offset_inner - *mean_offset) *
*inv_stdev_offset * *output_grad_offset_inner;
bias_grad_data[c] += *output_grad_offset_inner;
input_offset_inner += dim_stride;
output_grad_offset_inner += dim_stride;
}
mean_offset += C;
inv_stdev_offset += C;
}
}
}
} // namespace
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto output = Output(OUTPUT);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
output->ResizeLike(input);
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
InstanceNormKernel<<<
CAFFE_GET_BLOCKS(N * C * H * W),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto output = Output(OUTPUT);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
output->ResizeLike(input);
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
InstanceNormKernel<<<
CAFFE_GET_BLOCKS(N * C * H * W),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
auto input_grad = Output(INPUT_GRAD);
auto scale_grad = Output(SCALE_GRAD);
auto bias_grad = Output(BIAS_GRAD);
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(3));
input_grad->ResizeLike(input);
scale_grad->ResizeLike(scale);
bias_grad->ResizeLike(bias);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
InstanceNormScaleBiasGradientKernel<<<
CAFFE_GET_BLOCKS(C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
InstanceNormGradientKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
auto input_grad = Output(INPUT_GRAD);
auto scale_grad = Output(SCALE_GRAD);
auto bias_grad = Output(BIAS_GRAD);
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(3));
input_grad->ResizeLike(input);
scale_grad->ResizeLike(scale);
bias_grad->ResizeLike(bias);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
InstanceNormScaleBiasGradientKernel<<<
CAFFE_GET_BLOCKS(C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
InstanceNormGradientKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
REGISTER_CUDA_OPERATOR(InstanceNorm, InstanceNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
InstanceNormGradient,
InstanceNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
a7114c5ac967cf165d196a78c2b5825fb413e0b0.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA exercise to convert a simple serial code for a brute force
largest prime number search into CUDA. This initial code is serial,
but it is written as CUDA code for your convenience, so should be
compiled with nvcc (see below). Your task is to convert the serial
computation to a kernel computation. In the simplest case, use
atomicMax to find the globally largest prime number.
All prime numbers can be expressed as 6*k-1 or 6*k+1, k being an
integer. We provide the range of k to probe as macro parameters
KMIN and KMAX (see below).
You should get a speedup ~22 (with KMIN=100000000, KMAX=100100000,
BLOCK_SIZE=256, and default number of blocks per kernel NBLOCKS=560).
This is a 64-bit (long long int, instead of int) version - so in principle
you can find primes up to 2^64-1, or 1.8e19.
Hints:
* You can still use atomicMax, even in this 64-bit version, if you use
it not with prime numbers themselves (xmax), but with differences
between the prime number and the starting prime number candidate
value for the current kernel (__device__ int d_xmax), which should
fit in a 32-bit integer for any realistic size kernel.
* On the host, computation should be organized in a while loop, which
sets the initial prime candidate value for the loop, x0, computes number
of blocks for the main kernel, initializes d_xmax in a single-thread
kernel, and then submit the main kernel to device. Then you should copy
the current value of d_xmax back to the host, and compute the largest
found prime (this time using 64-bit integers) for the loop, as x0+d_xmax.
* It's very convenient to use a two-dimensional grid of blocks,
defined as "dim3 Nblocks (NBLOCKS, 2, 1);". The second grid
dimension is used to derive the two values of j=(-1; 1) inside the
kernel: "int j = 2*blockIdx.y - 1;". This way, there will be only
one loop inside the kernel - for y.
* When you get a failure (not a prime) inside the y loop, you can exit
the thread with "return" (no need to use "break").
To compile:
nvcc -arch=sm_20 -O2 primes64.cu -o primes64
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Range of k-numbers for primes search:
#define KMIN 100000000
#define KMAX 100100000
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 256
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Kernel(s) should go here:
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, success;
long long int k, j, x, xmax, y, ymax;
if (BLOCK_SIZE>1024)
{
printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE);
exit (1);
}
/* find number of device in current "context" */
hipGetDevice(&devid);
/* find how many devices are available */
if (hipGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
//--------------------------------------------------------------------------------
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
// This serial computation will have to be replaced by calls to kernel(s):
xmax = 0;
for (k=KMIN; k<=KMAX; k++)
{
// testing "-1" and "+1" cases:
for (j=-1; j<2; j=j+2)
{
// Prime candidate:
x = 6*k + j;
// We should be dividing by numbers up to sqrt(x):
ymax = (long long int)ceil(sqrt((double)x));
// Primality test:
for (y=3; y<=ymax; y=y+2)
{
// To be a success, the modulus should not be equal to zero:
success = x % y;
if (!success)
break;
}
if (success && x > xmax)
{
xmax = x;
}
}
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
printf ("%ld\n", xmax);
printf ("Time: %e\n", restime);
//--------------------------------------------------------------------------------
return 0;
}
| a7114c5ac967cf165d196a78c2b5825fb413e0b0.cu | /* CUDA exercise to convert a simple serial code for a brute force
largest prime number search into CUDA. This initial code is serial,
but it is written as CUDA code for your convenience, so should be
compiled with nvcc (see below). Your task is to convert the serial
computation to a kernel computation. In the simplest case, use
atomicMax to find the globally largest prime number.
All prime numbers can be expressed as 6*k-1 or 6*k+1, k being an
integer. We provide the range of k to probe as macro parameters
KMIN and KMAX (see below).
You should get a speedup ~22 (with KMIN=100000000, KMAX=100100000,
BLOCK_SIZE=256, and default number of blocks per kernel NBLOCKS=560).
This is a 64-bit (long long int, instead of int) version - so in principle
you can find primes up to 2^64-1, or 1.8e19.
Hints:
* You can still use atomicMax, even in this 64-bit version, if you use
it not with prime numbers themselves (xmax), but with differences
between the prime number and the starting prime number candidate
value for the current kernel (__device__ int d_xmax), which should
fit in a 32-bit integer for any realistic size kernel.
* On the host, computation should be organized in a while loop, which
sets the initial prime candidate value for the loop, x0, computes number
of blocks for the main kernel, initializes d_xmax in a single-thread
kernel, and then submit the main kernel to device. Then you should copy
the current value of d_xmax back to the host, and compute the largest
found prime (this time using 64-bit integers) for the loop, as x0+d_xmax.
* It's very convenient to use a two-dimensional grid of blocks,
defined as "dim3 Nblocks (NBLOCKS, 2, 1);". The second grid
dimension is used to derive the two values of j=(-1; 1) inside the
kernel: "int j = 2*blockIdx.y - 1;". This way, there will be only
one loop inside the kernel - for y.
* When you get a failure (not a prime) inside the y loop, you can exit
the thread with "return" (no need to use "break").
To compile:
nvcc -arch=sm_20 -O2 primes64.cu -o primes64
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Range of k-numbers for primes search:
#define KMIN 100000000
#define KMAX 100100000
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 256
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Kernel(s) should go here:
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, success;
long long int k, j, x, xmax, y, ymax;
if (BLOCK_SIZE>1024)
{
printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE);
exit (1);
}
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
// This serial computation will have to be replaced by calls to kernel(s):
xmax = 0;
for (k=KMIN; k<=KMAX; k++)
{
// testing "-1" and "+1" cases:
for (j=-1; j<2; j=j+2)
{
// Prime candidate:
x = 6*k + j;
// We should be dividing by numbers up to sqrt(x):
ymax = (long long int)ceil(sqrt((double)x));
// Primality test:
for (y=3; y<=ymax; y=y+2)
{
// To be a success, the modulus should not be equal to zero:
success = x % y;
if (!success)
break;
}
if (success && x > xmax)
{
xmax = x;
}
}
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
printf ("%ld\n", xmax);
printf ("Time: %e\n", restime);
//--------------------------------------------------------------------------------
return 0;
}
|
02adb352716b2cdc9843f567e39f71c2d008434d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "init_topp_id_val.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *topp_id_val_buf = NULL;
hipMalloc(&topp_id_val_buf, XSIZE*YSIZE);
int *topp_offset_buf = NULL;
hipMalloc(&topp_offset_buf, XSIZE*YSIZE);
const int batch_size = 1;
const int vocab_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
init_topp_id_val), dim3(gridBlock),dim3(threadBlock), 0, 0, topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
init_topp_id_val), dim3(gridBlock),dim3(threadBlock), 0, 0, topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
init_topp_id_val), dim3(gridBlock),dim3(threadBlock), 0, 0, topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 02adb352716b2cdc9843f567e39f71c2d008434d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "init_topp_id_val.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *topp_id_val_buf = NULL;
cudaMalloc(&topp_id_val_buf, XSIZE*YSIZE);
int *topp_offset_buf = NULL;
cudaMalloc(&topp_offset_buf, XSIZE*YSIZE);
const int batch_size = 1;
const int vocab_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
init_topp_id_val<<<gridBlock,threadBlock>>>(topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
init_topp_id_val<<<gridBlock,threadBlock>>>(topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
init_topp_id_val<<<gridBlock,threadBlock>>>(topp_id_val_buf,topp_offset_buf,batch_size,vocab_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
02e7d7125e44782004051a4700d300a28f0d1c07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <chrono>
#include <numeric>
#include "common_hip.cuh"
#include "simulation_hip.cuh"
int main() {
const double nu = 0.05;
const double sigma = 0.25;
const double width = 2;
const double height = 2;
const double dx = width / (IMAX-1);
const double dy = height / (JMAX-1);
const double dz = height / (KMAX-1);
const double dt = sigma * dx * dy * dz / nu;
const double cx = (nu * dt / (dx * dx));
const double cy = (nu * dt / (dy * dy));
const double cz = (nu * dt / (dz * dz));
// Host Data Initialization
std::vector<double> thost(IMAX * JMAX * KMAX);
for (int k = 0; k < KMAX; ++k) {
for (int j = 0; j < JMAX; ++j) {
for (int i = 0; i < IMAX; ++i) {
if (i < HOTCORNER_IMAX && j < HOTCORNER_JMAX && k < HOTCORNER_KMAX) {
thost[INDEX3D(i, j, k)] = 2.0;
} else {
thost[INDEX3D(i, j, k)] = 1.0;
}
}
}
}
std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now();
// Device Data Initialization
double *tnow;
double *tnext;
hipMalloc((void **) &tnow, IMAX * JMAX * KMAX * sizeof(double));
hipMalloc((void **) &tnext, IMAX * JMAX * KMAX * sizeof(double));
hipMemcpy(tnow, thost.data(), IMAX * JMAX * KMAX * sizeof(double), hipMemcpyHostToDevice);
// Calculate initial (inner) temperature
const unsigned long all_cells = (IMAX-2) * (JMAX-2) * (KMAX-2);
const unsigned long hot_cells = (HOTCORNER_IMAX-1) * (HOTCORNER_JMAX-1) * (HOTCORNER_KMAX-1);
double expected = hot_cells * 2.0 + (all_cells-hot_cells) * 1.0;
double temperature = 0.0;
for (int k = 1; k < KMAX-1; ++k) {
for (int j = 1; j < JMAX-1; ++j) {
temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature);
}
}
std::cout << "Initial Temperature: " << temperature << " Expected: " << expected << std::endl;
const unsigned int i_repeat = 3;
const unsigned int j_repeat = 3;
const unsigned int k_repeat = 3;
const dim3 dim_block(8, 8, 8);
const dim3 dim_grid((IMAX - 2 + i_repeat * dim_block.x - 1) / (i_repeat * dim_block.x),
(JMAX - 2 + j_repeat * dim_block.y - 1) / (j_repeat * dim_block.y),
(KMAX - 2 + k_repeat * dim_block.z - 1) / (k_repeat * dim_block.z));
const dim3 dim_ireflect_block(1, 16, 16);
const dim3 dim_ireflect_grid(1,
(JMAX-2 + dim_block.y - 1) / dim_block.y,
(KMAX-2+ dim_block.z - 1) / dim_block.z);
const dim3 dim_jreflect_block(16, 1, 16);
const dim3 dim_jreflect_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x,
1,
(KMAX - 2 + dim_block.z - 1) / dim_block.z);
const dim3 dim_kreflect_block(16, 16, 1);
const dim3 dim_kreflect_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x,
(JMAX - 2 + dim_block.y - 1) / dim_block.y,
1);
std::chrono::steady_clock::time_point t_sim_start = std::chrono::steady_clock::now();
for (int ts = 0; ts < TIMESTEPS; ++ts) {
hipLaunchKernelGGL(( DiffuseKnl<i_repeat, j_repeat, k_repeat>), dim3(dim_grid), dim3(dim_block), 0, 0, tnow, tnext, cx, cy, cz);
hipLaunchKernelGGL(( ReflectIKnl), dim3(dim_ireflect_grid), dim3(dim_ireflect_block), 0, 0, tnext);
hipLaunchKernelGGL(( ReflectJKnl), dim3(dim_jreflect_grid), dim3(dim_jreflect_block), 0, 0, tnext);
hipLaunchKernelGGL(( ReflectKKnl), dim3(dim_kreflect_grid), dim3(dim_kreflect_block), 0, 0, tnext);
std::swap(tnow, tnext);
}
hipDeviceSynchronize();
std::chrono::steady_clock::time_point t_sim_end = std::chrono::steady_clock::now();
hipMemcpy(thost.data(), tnow, IMAX * JMAX * KMAX * sizeof(double), hipMemcpyDeviceToHost);
temperature = 0.0;
for (int k = 1; k < KMAX-1; ++k) {
for (int j = 1; j < JMAX-1; ++j) {
temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature);
}
}
hipFree(tnow);
hipFree(tnext);
hipDeviceReset();
std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now();
std::chrono::duration<double> runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end-t_start);
std::chrono::duration<double> sim_runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_sim_end-t_sim_start);
std::cout << "Final Temperature: " << temperature << " Expected: " << expected << std::endl;
std::cout << "Time Elapsed (simulation): " << sim_runtime.count() << "s" << std::endl;
std::cout << "Time Elapsed (total): " << runtime.count() << "s" << std::endl;
return EXIT_SUCCESS;
}
| 02e7d7125e44782004051a4700d300a28f0d1c07.cu | #include <iostream>
#include <vector>
#include <chrono>
#include <numeric>
#include "common.cuh"
#include "simulation.cuh"
int main() {
const double nu = 0.05;
const double sigma = 0.25;
const double width = 2;
const double height = 2;
const double dx = width / (IMAX-1);
const double dy = height / (JMAX-1);
const double dz = height / (KMAX-1);
const double dt = sigma * dx * dy * dz / nu;
const double cx = (nu * dt / (dx * dx));
const double cy = (nu * dt / (dy * dy));
const double cz = (nu * dt / (dz * dz));
// Host Data Initialization
std::vector<double> thost(IMAX * JMAX * KMAX);
for (int k = 0; k < KMAX; ++k) {
for (int j = 0; j < JMAX; ++j) {
for (int i = 0; i < IMAX; ++i) {
if (i < HOTCORNER_IMAX && j < HOTCORNER_JMAX && k < HOTCORNER_KMAX) {
thost[INDEX3D(i, j, k)] = 2.0;
} else {
thost[INDEX3D(i, j, k)] = 1.0;
}
}
}
}
std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now();
// Device Data Initialization
double *tnow;
double *tnext;
cudaMalloc((void **) &tnow, IMAX * JMAX * KMAX * sizeof(double));
cudaMalloc((void **) &tnext, IMAX * JMAX * KMAX * sizeof(double));
cudaMemcpy(tnow, thost.data(), IMAX * JMAX * KMAX * sizeof(double), cudaMemcpyHostToDevice);
// Calculate initial (inner) temperature
const unsigned long all_cells = (IMAX-2) * (JMAX-2) * (KMAX-2);
const unsigned long hot_cells = (HOTCORNER_IMAX-1) * (HOTCORNER_JMAX-1) * (HOTCORNER_KMAX-1);
double expected = hot_cells * 2.0 + (all_cells-hot_cells) * 1.0;
double temperature = 0.0;
for (int k = 1; k < KMAX-1; ++k) {
for (int j = 1; j < JMAX-1; ++j) {
temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature);
}
}
std::cout << "Initial Temperature: " << temperature << " Expected: " << expected << std::endl;
const unsigned int i_repeat = 3;
const unsigned int j_repeat = 3;
const unsigned int k_repeat = 3;
const dim3 dim_block(8, 8, 8);
const dim3 dim_grid((IMAX - 2 + i_repeat * dim_block.x - 1) / (i_repeat * dim_block.x),
(JMAX - 2 + j_repeat * dim_block.y - 1) / (j_repeat * dim_block.y),
(KMAX - 2 + k_repeat * dim_block.z - 1) / (k_repeat * dim_block.z));
const dim3 dim_ireflect_block(1, 16, 16);
const dim3 dim_ireflect_grid(1,
(JMAX-2 + dim_block.y - 1) / dim_block.y,
(KMAX-2+ dim_block.z - 1) / dim_block.z);
const dim3 dim_jreflect_block(16, 1, 16);
const dim3 dim_jreflect_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x,
1,
(KMAX - 2 + dim_block.z - 1) / dim_block.z);
const dim3 dim_kreflect_block(16, 16, 1);
const dim3 dim_kreflect_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x,
(JMAX - 2 + dim_block.y - 1) / dim_block.y,
1);
std::chrono::steady_clock::time_point t_sim_start = std::chrono::steady_clock::now();
for (int ts = 0; ts < TIMESTEPS; ++ts) {
DiffuseKnl<i_repeat, j_repeat, k_repeat><<<dim_grid, dim_block>>>(tnow, tnext, cx, cy, cz);
ReflectIKnl<<<dim_ireflect_grid, dim_ireflect_block>>>(tnext);
ReflectJKnl<<<dim_jreflect_grid, dim_jreflect_block>>>(tnext);
ReflectKKnl<<<dim_kreflect_grid, dim_kreflect_block>>>(tnext);
std::swap(tnow, tnext);
}
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point t_sim_end = std::chrono::steady_clock::now();
cudaMemcpy(thost.data(), tnow, IMAX * JMAX * KMAX * sizeof(double), cudaMemcpyDeviceToHost);
temperature = 0.0;
for (int k = 1; k < KMAX-1; ++k) {
for (int j = 1; j < JMAX-1; ++j) {
temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature);
}
}
cudaFree(tnow);
cudaFree(tnext);
cudaDeviceReset();
std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now();
std::chrono::duration<double> runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end-t_start);
std::chrono::duration<double> sim_runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_sim_end-t_sim_start);
std::cout << "Final Temperature: " << temperature << " Expected: " << expected << std::endl;
std::cout << "Time Elapsed (simulation): " << sim_runtime.count() << "s" << std::endl;
std::cout << "Time Elapsed (total): " << runtime.count() << "s" << std::endl;
return EXIT_SUCCESS;
}
|
354f0f5a4e52bbca11bafa746ea40af2b00153cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "inc/helper_image.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
#include <iomanip>
#include <chrono>
#include "host_image_pocessing.h"
#include "device_image_pocessing.cuh"
using namespace std;
#define CUDA_DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG) || defined(CUDA_DEBUG)
if (result != hipSuccess)
{
cerr << "CUDA Runtime Error: " << hipGetErrorString(result) << endl;
assert(result == hipSuccess);
}
#endif
return result;
}
// Check errors
void postprocess(const unsigned char *in_data, const unsigned char *out_data, int size, float ms)
{
for (int i = 0; i < size; i++)
{
if (in_data[i] != out_data[i])
{
cout << endl << "*** FAILED ***" << endl;
cout << "Index: " << i << " exhibit: " << +in_data[i] << " result: " << +out_data[i] << endl << endl;
break;
}
}
cout << "Time spent: " << ms << endl;
}
const int NUM_REPS = 1;
int main()
{
char file_name[] = "image.ppm";
char cpu_resilt_file_name[] = "CPU_result.ppm";
char gpu_resilt_file_name[] = "GPU_result.ppm";
size_t width = 0;
size_t height = 0;
int channels = 0;
pixel *input_data = nullptr;
__loadPPM(
file_name, reinterpret_cast<unsigned char **>(&input_data),
reinterpret_cast<unsigned int*>(&width),
reinterpret_cast<unsigned int*>(&height),
reinterpret_cast<unsigned int*>(&channels)
);
cout << width << " " << height << " " << channels << endl << endl;
const size_t padded_width = width + 2;
const size_t padded_height = height + 2;
const size_t size = width * height;
const size_t width_in_bytes = width * sizeof(pixel);
const size_t padded_width_in_bytes = padded_width * sizeof(pixel);
const size_t size_in_bytes = width_in_bytes * height;
pixel *cpu_output_data = new pixel[size];
pixel *gpu_output_data = new pixel[size];
// ********************************************************************************************************
cout << "Filtering via CPU" << endl;
auto start_cpu = chrono::steady_clock::now();
for (int i = 0; i < NUM_REPS; i++)
{
ApplySquareAverageFilter(input_data, cpu_output_data, width, height);
}
auto end_cpu = chrono::steady_clock::now();
auto cpu_time = end_cpu - start_cpu;
cout << "CPU time: " << chrono::duration<double, milli>(cpu_time).count() / NUM_REPS << endl;
// ********************************************************************************************************
// ********************************************************************************************************
size_t input_pitch = 0;
pixel *padded_input = PadDataByOnePixel(input_data, width, height);
unsigned char *pitched_input_data = nullptr;
checkCuda(hipMallocPitch(reinterpret_cast<void **>(&pitched_input_data), &input_pitch, padded_width_in_bytes, padded_height));
checkCuda(hipMemcpy2D(
pitched_input_data,
input_pitch,
reinterpret_cast<unsigned char **>(padded_input),
padded_width_in_bytes,
padded_width_in_bytes,
padded_height,
hipMemcpyHostToDevice
));
size_t output_pitch = 0;
unsigned char *pitched_output_data = nullptr;
hipMallocPitch(reinterpret_cast<void**>(&pitched_output_data), &output_pitch, width_in_bytes, height);
float time = 0;
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent));
checkCuda(hipEventCreate(&stopEvent));
//
cout << "Filtering via GPU" << " pitch: " << input_pitch << endl;
int aligned_width = (input_pitch + AMOUNT_OF_THREADS_X - 1) / AMOUNT_OF_THREADS_X;
int aligned_height = (padded_height + AMOUNT_OF_THREADS_Y - 1) / AMOUNT_OF_THREADS_Y;
dim3 dimGrid(aligned_width, aligned_height, 1);
dim3 dimBlock(AMOUNT_OF_THREADS_X, AMOUNT_OF_THREADS_Y, 1);
// warm up
hipLaunchKernelGGL(( ApplySquareAverageFilter) , dim3(dimGrid), dim3(dimBlock), 0, 0,
pitched_input_data, pitched_output_data,
width_in_bytes, height,
padded_width_in_bytes, padded_height,
input_pitch, output_pitch
);
checkCuda(hipEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
{
hipLaunchKernelGGL(( ApplySquareAverageFilter) , dim3(dimGrid), dim3(dimBlock), 0, 0,
pitched_input_data, pitched_output_data,
width_in_bytes, height,
padded_width_in_bytes, padded_height,
input_pitch, output_pitch
);
}
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
time /= NUM_REPS;
cout << "GPU time: " << time << endl;
checkCuda(hipMemcpy2D(
reinterpret_cast<unsigned char *>(gpu_output_data),
width_in_bytes,
pitched_output_data,
output_pitch,
width_in_bytes,
height,
hipMemcpyDeviceToHost
));
// ********************************************************************************************************
// check
postprocess(reinterpret_cast<unsigned char *>(cpu_output_data), reinterpret_cast<unsigned char *>(gpu_output_data), size, time);
__savePPM(cpu_resilt_file_name, reinterpret_cast<unsigned char *>(cpu_output_data), width, height, channels);
__savePPM(gpu_resilt_file_name, reinterpret_cast<unsigned char *>(gpu_output_data), width, height, channels);
checkCuda(hipEventDestroy(startEvent));
checkCuda(hipEventDestroy(stopEvent));
checkCuda(hipFree(pitched_input_data));
checkCuda(hipFree(pitched_output_data));
delete[] input_data;
delete[] cpu_output_data;
delete[] gpu_output_data;
} | 354f0f5a4e52bbca11bafa746ea40af2b00153cf.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "inc/helper_image.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
#include <iomanip>
#include <chrono>
#include "host_image_pocessing.h"
#include "device_image_pocessing.cuh"
using namespace std;
#define CUDA_DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG) || defined(CUDA_DEBUG)
if (result != cudaSuccess)
{
cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << endl;
assert(result == cudaSuccess);
}
#endif
return result;
}
// Check errors
void postprocess(const unsigned char *in_data, const unsigned char *out_data, int size, float ms)
{
for (int i = 0; i < size; i++)
{
if (in_data[i] != out_data[i])
{
cout << endl << "*** FAILED ***" << endl;
cout << "Index: " << i << " exhibit: " << +in_data[i] << " result: " << +out_data[i] << endl << endl;
break;
}
}
cout << "Time spent: " << ms << endl;
}
const int NUM_REPS = 1;
int main()
{
char file_name[] = "image.ppm";
char cpu_resilt_file_name[] = "CPU_result.ppm";
char gpu_resilt_file_name[] = "GPU_result.ppm";
size_t width = 0;
size_t height = 0;
int channels = 0;
pixel *input_data = nullptr;
__loadPPM(
file_name, reinterpret_cast<unsigned char **>(&input_data),
reinterpret_cast<unsigned int*>(&width),
reinterpret_cast<unsigned int*>(&height),
reinterpret_cast<unsigned int*>(&channels)
);
cout << width << " " << height << " " << channels << endl << endl;
const size_t padded_width = width + 2;
const size_t padded_height = height + 2;
const size_t size = width * height;
const size_t width_in_bytes = width * sizeof(pixel);
const size_t padded_width_in_bytes = padded_width * sizeof(pixel);
const size_t size_in_bytes = width_in_bytes * height;
pixel *cpu_output_data = new pixel[size];
pixel *gpu_output_data = new pixel[size];
// ********************************************************************************************************
cout << "Filtering via CPU" << endl;
auto start_cpu = chrono::steady_clock::now();
for (int i = 0; i < NUM_REPS; i++)
{
ApplySquareAverageFilter(input_data, cpu_output_data, width, height);
}
auto end_cpu = chrono::steady_clock::now();
auto cpu_time = end_cpu - start_cpu;
cout << "CPU time: " << chrono::duration<double, milli>(cpu_time).count() / NUM_REPS << endl;
// ********************************************************************************************************
// ********************************************************************************************************
size_t input_pitch = 0;
pixel *padded_input = PadDataByOnePixel(input_data, width, height);
unsigned char *pitched_input_data = nullptr;
checkCuda(cudaMallocPitch(reinterpret_cast<void **>(&pitched_input_data), &input_pitch, padded_width_in_bytes, padded_height));
checkCuda(cudaMemcpy2D(
pitched_input_data,
input_pitch,
reinterpret_cast<unsigned char **>(padded_input),
padded_width_in_bytes,
padded_width_in_bytes,
padded_height,
cudaMemcpyHostToDevice
));
size_t output_pitch = 0;
unsigned char *pitched_output_data = nullptr;
cudaMallocPitch(reinterpret_cast<void**>(&pitched_output_data), &output_pitch, width_in_bytes, height);
float time = 0;
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
//
cout << "Filtering via GPU" << " pitch: " << input_pitch << endl;
int aligned_width = (input_pitch + AMOUNT_OF_THREADS_X - 1) / AMOUNT_OF_THREADS_X;
int aligned_height = (padded_height + AMOUNT_OF_THREADS_Y - 1) / AMOUNT_OF_THREADS_Y;
dim3 dimGrid(aligned_width, aligned_height, 1);
dim3 dimBlock(AMOUNT_OF_THREADS_X, AMOUNT_OF_THREADS_Y, 1);
// warm up
ApplySquareAverageFilter <<<dimGrid, dimBlock>>> (
pitched_input_data, pitched_output_data,
width_in_bytes, height,
padded_width_in_bytes, padded_height,
input_pitch, output_pitch
);
checkCuda(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
{
ApplySquareAverageFilter <<<dimGrid, dimBlock>>> (
pitched_input_data, pitched_output_data,
width_in_bytes, height,
padded_width_in_bytes, padded_height,
input_pitch, output_pitch
);
}
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
time /= NUM_REPS;
cout << "GPU time: " << time << endl;
checkCuda(cudaMemcpy2D(
reinterpret_cast<unsigned char *>(gpu_output_data),
width_in_bytes,
pitched_output_data,
output_pitch,
width_in_bytes,
height,
cudaMemcpyDeviceToHost
));
// ********************************************************************************************************
// check
postprocess(reinterpret_cast<unsigned char *>(cpu_output_data), reinterpret_cast<unsigned char *>(gpu_output_data), size, time);
__savePPM(cpu_resilt_file_name, reinterpret_cast<unsigned char *>(cpu_output_data), width, height, channels);
__savePPM(gpu_resilt_file_name, reinterpret_cast<unsigned char *>(gpu_output_data), width, height, channels);
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
checkCuda(cudaFree(pitched_input_data));
checkCuda(cudaFree(pitched_output_data));
delete[] input_data;
delete[] cpu_output_data;
delete[] gpu_output_data;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.