hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
9c1ea9d92c7319457c506c7b29732067be0442dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "head.h"
extern float *d_F, *d_x, *d_y;
extern float *d_V2;
extern float *d_V_tmp;
extern float *d_t;
__global__ void GPU_fsource(float *d_V_tmp, float *d_t, float *d_x, float *d_y, float *d_F){
int k = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(k<Np){
for(j=0;j<Np;j++){
d_F[k*Np+j] = exp(-2.0*d_t[0])*cos(M_PI*d_x[k])*
cos(M_PI*d_y[j])*(2.0*M_PI*M_PI)
-2.0*d_V_tmp[k*Np+j];
}
}
}
__global__ void GPU_RKa(float *d_V_tmp, float *d_V2, float *d_F, float *d_t){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np*Np){
d_V_tmp[i] = d_V2[i] + (1.0/2.0)*dt*d_F[i];
}
if(i==0) d_t[0] = d_t[0]+dt/2.0;
}
__global__ void GPU_RKb(float *d_V2, float *d_F, float *d_t){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np*Np){
d_V2[i] = d_V2[i] + dt*d_F[i];
}
if(i==0) d_t[0] = d_t[0]+dt/2.0;
}
void RK(){
int tpb = 256;
int bpg = (Np*Np+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_RKa), dim3(bpg), dim3(tpb), 0, 0, d_V_tmp, d_V2, d_F, d_t);
hipLaunchKernelGGL(( GPU_fsource), dim3(bpg), dim3(tpb), 0, 0, d_V_tmp, d_t, d_x, d_y, d_F);
hipLaunchKernelGGL(( GPU_RKb), dim3(bpg), dim3(tpb), 0, 0, d_V2, d_F, d_t);
}
| 9c1ea9d92c7319457c506c7b29732067be0442dd.cu | #include "head.h"
extern float *d_F, *d_x, *d_y;
extern float *d_V2;
extern float *d_V_tmp;
extern float *d_t;
__global__ void GPU_fsource(float *d_V_tmp, float *d_t, float *d_x, float *d_y, float *d_F){
int k = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(k<Np){
for(j=0;j<Np;j++){
d_F[k*Np+j] = exp(-2.0*d_t[0])*cos(M_PI*d_x[k])*
cos(M_PI*d_y[j])*(2.0*M_PI*M_PI)
-2.0*d_V_tmp[k*Np+j];
}
}
}
__global__ void GPU_RKa(float *d_V_tmp, float *d_V2, float *d_F, float *d_t){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np*Np){
d_V_tmp[i] = d_V2[i] + (1.0/2.0)*dt*d_F[i];
}
if(i==0) d_t[0] = d_t[0]+dt/2.0;
}
__global__ void GPU_RKb(float *d_V2, float *d_F, float *d_t){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np*Np){
d_V2[i] = d_V2[i] + dt*d_F[i];
}
if(i==0) d_t[0] = d_t[0]+dt/2.0;
}
void RK(){
int tpb = 256;
int bpg = (Np*Np+tpb-1)/tpb;
GPU_RKa<<<bpg, tpb>>>(d_V_tmp, d_V2, d_F, d_t);
GPU_fsource<<<bpg, tpb>>>(d_V_tmp, d_t, d_x, d_y, d_F);
GPU_RKb<<<bpg, tpb>>>(d_V2, d_F, d_t);
}
|
9b6370c8e85c5312bfde248e0079863f909063ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
int p,
THCTensor *weights,
accreal margin_,
bool reduce)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size[0],
sizeAverage,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size[0],
sizeAverage,
margin
);
}
THCudaCheck(hipGetLastError());
}
else if (input->nDimension == 2)
{
int nframe = input->size[0];
THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (!reduce)
{
THCTensor_(resize1d)(state, output, input->size[0]);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
false,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
false,
margin
);
}
THCudaCheck(hipGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size[0]); // tmp output buffer
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
sizeAverage,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size[0], input->size[1],
sizeAverage,
margin
);
}
THCudaCheck(hipGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
THError("vector or matrix expected");
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
int p,
THCTensor *weights,
accreal margin_,
bool reduce)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size[0],
sizeAverage,
margin,
reduce
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size[0],
sizeAverage,
margin,
reduce
);
}
THCudaCheck(hipGetLastError());
}
else if (input->nDimension == 2)
{
int nframe = gradInput->size[0];
THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size[1],
sizeAverage,
margin,
reduce
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size[1],
sizeAverage,
margin,
reduce
);
}
THCudaCheck(hipGetLastError());
}
else
{
THError("vector or matrix expected");
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
| 9b6370c8e85c5312bfde248e0079863f909063ed.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
int p,
THCTensor *weights,
accreal margin_,
bool reduce)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size[0],
sizeAverage,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size[0],
sizeAverage,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->nDimension == 2)
{
int nframe = input->size[0];
THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (!reduce)
{
THCTensor_(resize1d)(state, output, input->size[0]);
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
false,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
false,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size[0]); // tmp output buffer
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size[1],
sizeAverage,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size[0], input->size[1],
sizeAverage,
margin
);
}
THCudaCheck(cudaGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
THError("vector or matrix expected");
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
int p,
THCTensor *weights,
accreal margin_,
bool reduce)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size[0],
sizeAverage,
margin,
reduce
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size[0],
sizeAverage,
margin,
reduce
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->nDimension == 2)
{
int nframe = gradInput->size[0];
THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size[1],
sizeAverage,
margin,
reduce
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size[1],
sizeAverage,
margin,
reduce
);
}
THCudaCheck(cudaGetLastError());
}
else
{
THError("vector or matrix expected");
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
|
29f3fc95322f2109e46ee345b72cb9a41e7decc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/lookup_table_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
template <int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag>
__global__ void LookupTableKernel(float *output,
const float *table,
const int64_t *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
float *out = output + idy * D;
const float *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<float>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
void LookupTableCompute::Run() {
auto ¶m = this->Param<param_t>();
auto &ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
const Tensor *w_t = param.W;
const Tensor *ids_t = param.Ids;
Tensor *out_t = param.Out;
int64_t padding_idx = param.padding_idx;
size_t N = w_t->dims()[0];
size_t D = w_t->dims()[1];
size_t K = ids_t->numel();
auto *w = w_t->data<float>();
auto *ids = ids_t->data<int64_t>();
auto *out = out_t->mutable_data<float>(TARGET(kCUDA));
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1) {
hipLaunchKernelGGL(( LookupTableKernel<128, 8, 8, false>), dim3(grids), dim3(threads), 0, stream,
out, w, ids, N, K, D, padding_idx);
} else {
hipLaunchKernelGGL(( LookupTableKernel<128, 8, 8, true>), dim3(grids), dim3(threads), 0, stream,
out, w, ids, N, K, D, padding_idx);
}
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(lookup_table,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LookupTableCompute,
def)
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.Finalize();
REGISTER_LITE_KERNEL(lookup_table_v2,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LookupTableCompute,
def)
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.Finalize();
| 29f3fc95322f2109e46ee345b72cb9a41e7decc6.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/lookup_table_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
template <int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag>
__global__ void LookupTableKernel(float *output,
const float *table,
const int64_t *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
float *out = output + idy * D;
const float *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<float>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
void LookupTableCompute::Run() {
auto ¶m = this->Param<param_t>();
auto &ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
const Tensor *w_t = param.W;
const Tensor *ids_t = param.Ids;
Tensor *out_t = param.Out;
int64_t padding_idx = param.padding_idx;
size_t N = w_t->dims()[0];
size_t D = w_t->dims()[1];
size_t K = ids_t->numel();
auto *w = w_t->data<float>();
auto *ids = ids_t->data<int64_t>();
auto *out = out_t->mutable_data<float>(TARGET(kCUDA));
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1) {
LookupTableKernel<128, 8, 8, false><<<grids, threads, 0, stream>>>(
out, w, ids, N, K, D, padding_idx);
} else {
LookupTableKernel<128, 8, 8, true><<<grids, threads, 0, stream>>>(
out, w, ids, N, K, D, padding_idx);
}
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(lookup_table,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LookupTableCompute,
def)
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.Finalize();
REGISTER_LITE_KERNEL(lookup_table_v2,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::LookupTableCompute,
def)
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt64))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat))})
.Finalize();
|
4ffbf503a92f07fb35dfa5ef6563f7b527a76233.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/upsample_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int output_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(index, output_size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(out_y / height_scale, input_height - 1);
const int in_x = fminf(out_x / width_scale, input_width - 1);
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
Y[index] =
(h0lambda *
(w0lambda *
X[idx(
n, num_channels, c, input_height, input_width, h1, w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1,
w1 + w1p)]) +
h1lambda *
(w0lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1 + w1p)]));
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
auto* Y = Output(0);
const auto inputDims = X.dims();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
if (InputSize() == 2) {
const auto& scales = Input(1);
CAFFE_ENFORCE_EQ(scales.ndim(), 1);
CAFFE_ENFORCE_EQ(scales.size(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
int output_width = input_width * width_scale_;
int output_height = input_height * height_scale_;
Y->Resize(batch_size, num_channels, output_height, output_width);
const auto size = Y->size();
hipLaunchKernelGGL(( UpsampleBilinearKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
auto* dX = Output(0);
const auto inputDims = dY.dims();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
if (InputSize() == 3) {
const auto& scales = Input(2);
CAFFE_ENFORCE_EQ(scales.ndim(), 1);
CAFFE_ENFORCE_EQ(scales.size(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
dX->Resize(batch_size, num_channels, output_height, output_width);
math::Set<float, CUDAContext>(
dX->size(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.size();
hipLaunchKernelGGL(( UpsampleBilinearGradientKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinear,
UpsampleBilinearOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearGradient,
UpsampleBilinearGradientOp<float, CUDAContext>);
} // namespace caffe2
| 4ffbf503a92f07fb35dfa5ef6563f7b527a76233.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/upsample_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int output_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(index, output_size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(out_y / height_scale, input_height - 1);
const int in_x = fminf(out_x / width_scale, input_width - 1);
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
Y[index] =
(h0lambda *
(w0lambda *
X[idx(
n, num_channels, c, input_height, input_width, h1, w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1,
w1 + w1p)]) +
h1lambda *
(w0lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1 + w1p)]));
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
auto* Y = Output(0);
const auto inputDims = X.dims();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
if (InputSize() == 2) {
const auto& scales = Input(1);
CAFFE_ENFORCE_EQ(scales.ndim(), 1);
CAFFE_ENFORCE_EQ(scales.size(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
int output_width = input_width * width_scale_;
int output_height = input_height * height_scale_;
Y->Resize(batch_size, num_channels, output_height, output_width);
const auto size = Y->size();
UpsampleBilinearKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
auto* dX = Output(0);
const auto inputDims = dY.dims();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
if (InputSize() == 3) {
const auto& scales = Input(2);
CAFFE_ENFORCE_EQ(scales.ndim(), 1);
CAFFE_ENFORCE_EQ(scales.size(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
dX->Resize(batch_size, num_channels, output_height, output_width);
math::Set<float, CUDAContext>(
dX->size(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.size();
UpsampleBilinearGradientKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinear,
UpsampleBilinearOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearGradient,
UpsampleBilinearGradientOp<float, CUDAContext>);
} // namespace caffe2
|
a0db17fd1af1c52e71c4d7d69fcd535f0537cb4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/math/bias_softmax.h"
#include <limits>
#include <algorithm>
#include "core/providers/common.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cudnn_common.h"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh"
#include "core/providers/cuda/math/softmax_impl.cuh"
#include "core/providers/cuda/shared_inc/accumulation_type.h"
using namespace onnxruntime;
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Duplicated softmax_impl.cu here
// So far attempt to use shared kernel with additional template resulted in lost performance
// Note: The intended case for 'input_bias' is the input sequence mask for transformer models
// As an additive mask, it should be zero for preserved tokens and -infty for tokens to screen
// The mask will broadcast from [batch_size, 1, 1, seq_len] to input [batch_size, num_heads, seq_len, seq_len]
// Here element_count = seq_len and bias_broadcast_size_per_batch = num_heads * seq_len
// The softmax + additive mask fusion follows NVIDIA apex's additive_masked_softmax_warp_forward
// see https://github.com/NVIDIA/apex/blob/4ef930c1c884fdca5f472ab2ce7cb9b505d26c1a/apex/contrib/csrc/multihead_attn/softmax.h
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
__global__ void BiasSoftmaxWarpForward(
output_t* output,
const input_t* input,
const input_t* input_bias,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_count_per_batch) {
// "WARP" refers to cooperative threads and might not equal 32 threads of GPU warp
// thread block is (WARP_SIZE, 128/WARP_SIZE)
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = next_power_of_two < GPU_WARP_SIZE ? next_power_of_two : GPU_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
// each "WARP" (<=32) processes WARP_BATCH(one of {1,2}) batches
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// last warp may have fewer batches
int local_batches = batch_count - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// thread will process elements (local_index + n * warp_size) within batch
int local_idx = threadIdx.x;
// push input, input_bias output pointers to batch we need to process
input += first_batch * batch_stride + local_idx;
output += first_batch * batch_stride + local_idx;
// load from global memory and apply bias (likely an additive mask)
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
// the bias has assumed shape [batch_size, element_count]
// .. and needs to broadcast to [batch_size, broadcast_size, element_count]
int bias_offset = (first_batch + i) / bias_broadcast_count_per_batch * batch_stride + local_idx;
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = (acc_t)input[i * element_count + it * WARP_SIZE] + (acc_t)input_bias[bias_offset + it * WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// find maximum value within batch for numerical stability
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// normalization factor Z = Sum[ exp(element_i), for element_i in batch ]
acc_t sum[WARP_BATCH]{acc_t(0.0)};
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
elements[i][it] = ::exp((acc_t)(elements[i][it] - max_value[i]));
sum[i] += elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// write back normalized value = exp(element_i)/Z to global memory
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
output[i * element_count + it * WARP_SIZE] = elements[i][it] / sum[i];
} else {
break;
}
}
}
}
template <typename T>
void DispatchBiasSoftmaxForwardImpl(
Tensor* output_tensor,
const Tensor* input_tensor,
const Tensor* input_bias_tensor,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_size_per_batch) {
typedef typename ToCudaType<T>::MappedType CudaT;
typedef CudaT input_t;
typedef CudaT output_t;
typedef AccumulationType_t<CudaT> acc_t;
const auto* input = reinterpret_cast<const CudaT*>(input_tensor->template Data<T>());
const auto* input_bias = reinterpret_cast<const CudaT*>(input_bias_tensor->template Data<T>());
auto* output = reinterpret_cast<CudaT*>(output_tensor->template MutableData<T>());
if (element_count == 0)
return;
int log2_elements = log2_ceil(element_count);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = ::min(next_power_of_two, GPU_WARP_SIZE);
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 0>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 1: // 2
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 1>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 2: // 4
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 2>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 3: // 8
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 3>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 4: // 16
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 4>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 5: // 32
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 5>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 6: // 64
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 6>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 7: // 128
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 7>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 8: // 256
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 8>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 9: // 512
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 9>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 10: // 1024
hipLaunchKernelGGL(( BiasSoftmaxWarpForward<input_t, output_t, acc_t, 10>)
, dim3(blocks), dim3(threads), 0, 0, output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
default:
break;
}
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL(T) \
template void DispatchBiasSoftmaxForwardImpl<T>( \
Tensor * output_tensor, \
const Tensor* input_tensor, \
const Tensor* input_bias_tensor, \
int element_count, \
int batch_count, \
int batch_stride, \
int bias_broadcast_size_per_batch);
SPECIALIZED_BIAS_SOFTMAX_IMPL(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL(MLFloat16)
// For large element count we fall back to explicit Add kernel + CUDA DNN library
// note: This is an unhappy path! There is no performance benefit for the fusion.
template <typename T>
void DispatchBiasSoftMaxForwardViaDnnLibraryImpl(
cudnnHandle_t cudaDnnHandle,
int element_count,
int batch_count,
int broadcast_axis,
int softmax_axis,
const onnxruntime::TensorShape& X_shape,
const onnxruntime::Tensor* X,
const onnxruntime::TensorShape& B_shape,
const onnxruntime::Tensor* B,
onnxruntime::Tensor* Y) {
typedef typename ToCudaType<T>::MappedType CudaT;
const auto* X_data = reinterpret_cast<const CudaT*>(X->template Data<T>());
const auto* B_data = reinterpret_cast<const CudaT*>(B->template Data<T>());
auto* Y_data = reinterpret_cast<CudaT*>(Y->template MutableData<T>());
// binary elementise kernel requires input pitches
TArray<int64_t> lhs_padded_strides(static_cast<int>(X_shape.NumDimensions()));
int64_t lhs_pitch = 1, rhs_pitch = 1;
for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) {
size_t positive_i = X_shape.NumDimensions() + i;
lhs_padded_strides[static_cast<int>(positive_i)] = lhs_pitch;
lhs_pitch *= X_shape[positive_i];
}
// set pitches for bias so it broadcasts along relevant dimensions
TArray<int64_t> rhs_padded_strides(static_cast<int>(X_shape.NumDimensions()));
for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) {
size_t positive_ix = X_shape.NumDimensions() + i;
size_t positive_ib = B_shape.NumDimensions() + i;
if (broadcast_axis <= positive_ix && positive_ix < softmax_axis) {
rhs_padded_strides[static_cast<int>(positive_ix)] = 0;
continue;
}
rhs_padded_strides[static_cast<int>(positive_ix)] = rhs_pitch;
rhs_pitch *= B_shape[positive_ib];
}
TArray<fast_divmod> fdm_output_strides(static_cast<int>(X_shape.NumDimensions()));
//TODO: fast_divmod only supports int32
for (int i = 0; i < fdm_output_strides.Size(); i++)
fdm_output_strides[i] = fast_divmod(static_cast<int>(lhs_padded_strides[i]));
fast_divmod fdm_H, fdm_C;
// invoke elementwise add with broadcast kernel
::onnxruntime::cuda::BinaryElementWiseImpl(
(int32_t)X_shape.NumDimensions(),
&lhs_padded_strides,
X_data,
&rhs_padded_strides,
B_data,
&fdm_output_strides,
fdm_H,
fdm_C,
Y_data,
OP_Add<CudaT, CudaT, CudaT>(),
(size_t)X_shape.Size());
// invoke cuda DNN library for Y = softmax(X)
std::vector<int64_t> dims({batch_count, 1, 1, element_count});
const auto alpha = Consts<CudaT>::One;
const auto beta = Consts<CudaT>::Zero;
CudnnTensor input_tensor, output_tensor;
input_tensor.Set(dims, CudnnTensor::GetDataType<CudaT>());
output_tensor.Set(dims, CudnnTensor::GetDataType<CudaT>());
cudnnSoftmaxForward(
cudaDnnHandle,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_INSTANCE,
&alpha,
input_tensor,
Y_data,
&beta,
output_tensor,
Y_data);
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(T) \
template void DispatchBiasSoftMaxForwardViaDnnLibraryImpl<T>( \
cudnnHandle_t cudaDnnHandle, \
int element_count, \
int batch_count, \
int broadcast_axis, \
int softmax_axis, \
const onnxruntime::TensorShape& X_shape, \
const Tensor* X_data, \
const onnxruntime::TensorShape& B_shape, \
const Tensor* B_data, \
Tensor* Y_data);
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(MLFloat16)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| a0db17fd1af1c52e71c4d7d69fcd535f0537cb4b.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/math/bias_softmax.h"
#include <limits>
#include <algorithm>
#include "core/providers/common.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cudnn_common.h"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh"
#include "core/providers/cuda/math/softmax_impl.cuh"
#include "core/providers/cuda/shared_inc/accumulation_type.h"
using namespace onnxruntime;
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Duplicated softmax_impl.cu here
// So far attempt to use shared kernel with additional template resulted in lost performance
// Note: The intended case for 'input_bias' is the input sequence mask for transformer models
// As an additive mask, it should be zero for preserved tokens and -infty for tokens to screen
// The mask will broadcast from [batch_size, 1, 1, seq_len] to input [batch_size, num_heads, seq_len, seq_len]
// Here element_count = seq_len and bias_broadcast_size_per_batch = num_heads * seq_len
// The softmax + additive mask fusion follows NVIDIA apex's additive_masked_softmax_warp_forward
// see https://github.com/NVIDIA/apex/blob/4ef930c1c884fdca5f472ab2ce7cb9b505d26c1a/apex/contrib/csrc/multihead_attn/softmax.h
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
__global__ void BiasSoftmaxWarpForward(
output_t* output,
const input_t* input,
const input_t* input_bias,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_count_per_batch) {
// "WARP" refers to cooperative threads and might not equal 32 threads of GPU warp
// thread block is (WARP_SIZE, 128/WARP_SIZE)
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = next_power_of_two < GPU_WARP_SIZE ? next_power_of_two : GPU_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
// each "WARP" (<=32) processes WARP_BATCH(one of {1,2}) batches
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// last warp may have fewer batches
int local_batches = batch_count - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// thread will process elements (local_index + n * warp_size) within batch
int local_idx = threadIdx.x;
// push input, input_bias output pointers to batch we need to process
input += first_batch * batch_stride + local_idx;
output += first_batch * batch_stride + local_idx;
// load from global memory and apply bias (likely an additive mask)
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
// the bias has assumed shape [batch_size, element_count]
// .. and needs to broadcast to [batch_size, broadcast_size, element_count]
int bias_offset = (first_batch + i) / bias_broadcast_count_per_batch * batch_stride + local_idx;
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = (acc_t)input[i * element_count + it * WARP_SIZE] + (acc_t)input_bias[bias_offset + it * WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// find maximum value within batch for numerical stability
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// normalization factor Z = Sum[ exp(element_i), for element_i in batch ]
acc_t sum[WARP_BATCH]{acc_t(0.0)};
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
elements[i][it] = std::exp((acc_t)(elements[i][it] - max_value[i]));
sum[i] += elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// write back normalized value = exp(element_i)/Z to global memory
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
output[i * element_count + it * WARP_SIZE] = elements[i][it] / sum[i];
} else {
break;
}
}
}
}
template <typename T>
void DispatchBiasSoftmaxForwardImpl(
Tensor* output_tensor,
const Tensor* input_tensor,
const Tensor* input_bias_tensor,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_size_per_batch) {
typedef typename ToCudaType<T>::MappedType CudaT;
typedef CudaT input_t;
typedef CudaT output_t;
typedef AccumulationType_t<CudaT> acc_t;
const auto* input = reinterpret_cast<const CudaT*>(input_tensor->template Data<T>());
const auto* input_bias = reinterpret_cast<const CudaT*>(input_bias_tensor->template Data<T>());
auto* output = reinterpret_cast<CudaT*>(output_tensor->template MutableData<T>());
if (element_count == 0)
return;
int log2_elements = log2_ceil(element_count);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = std::min(next_power_of_two, GPU_WARP_SIZE);
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 0>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 1: // 2
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 1>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 2: // 4
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 2>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 3: // 8
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 3>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 4: // 16
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 4>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 5: // 32
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 5>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 6: // 64
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 6>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 7: // 128
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 7>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 8: // 256
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 8>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 9: // 512
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 9>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 10: // 1024
BiasSoftmaxWarpForward<input_t, output_t, acc_t, 10>
<<<blocks, threads, 0>>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
default:
break;
}
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL(T) \
template void DispatchBiasSoftmaxForwardImpl<T>( \
Tensor * output_tensor, \
const Tensor* input_tensor, \
const Tensor* input_bias_tensor, \
int element_count, \
int batch_count, \
int batch_stride, \
int bias_broadcast_size_per_batch);
SPECIALIZED_BIAS_SOFTMAX_IMPL(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL(MLFloat16)
// For large element count we fall back to explicit Add kernel + CUDA DNN library
// note: This is an unhappy path! There is no performance benefit for the fusion.
template <typename T>
void DispatchBiasSoftMaxForwardViaDnnLibraryImpl(
cudnnHandle_t cudaDnnHandle,
int element_count,
int batch_count,
int broadcast_axis,
int softmax_axis,
const onnxruntime::TensorShape& X_shape,
const onnxruntime::Tensor* X,
const onnxruntime::TensorShape& B_shape,
const onnxruntime::Tensor* B,
onnxruntime::Tensor* Y) {
typedef typename ToCudaType<T>::MappedType CudaT;
const auto* X_data = reinterpret_cast<const CudaT*>(X->template Data<T>());
const auto* B_data = reinterpret_cast<const CudaT*>(B->template Data<T>());
auto* Y_data = reinterpret_cast<CudaT*>(Y->template MutableData<T>());
// binary elementise kernel requires input pitches
TArray<int64_t> lhs_padded_strides(static_cast<int>(X_shape.NumDimensions()));
int64_t lhs_pitch = 1, rhs_pitch = 1;
for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) {
size_t positive_i = X_shape.NumDimensions() + i;
lhs_padded_strides[static_cast<int>(positive_i)] = lhs_pitch;
lhs_pitch *= X_shape[positive_i];
}
// set pitches for bias so it broadcasts along relevant dimensions
TArray<int64_t> rhs_padded_strides(static_cast<int>(X_shape.NumDimensions()));
for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) {
size_t positive_ix = X_shape.NumDimensions() + i;
size_t positive_ib = B_shape.NumDimensions() + i;
if (broadcast_axis <= positive_ix && positive_ix < softmax_axis) {
rhs_padded_strides[static_cast<int>(positive_ix)] = 0;
continue;
}
rhs_padded_strides[static_cast<int>(positive_ix)] = rhs_pitch;
rhs_pitch *= B_shape[positive_ib];
}
TArray<fast_divmod> fdm_output_strides(static_cast<int>(X_shape.NumDimensions()));
//TODO: fast_divmod only supports int32
for (int i = 0; i < fdm_output_strides.Size(); i++)
fdm_output_strides[i] = fast_divmod(static_cast<int>(lhs_padded_strides[i]));
fast_divmod fdm_H, fdm_C;
// invoke elementwise add with broadcast kernel
::onnxruntime::cuda::BinaryElementWiseImpl(
(int32_t)X_shape.NumDimensions(),
&lhs_padded_strides,
X_data,
&rhs_padded_strides,
B_data,
&fdm_output_strides,
fdm_H,
fdm_C,
Y_data,
OP_Add<CudaT, CudaT, CudaT>(),
(size_t)X_shape.Size());
// invoke cuda DNN library for Y = softmax(X)
std::vector<int64_t> dims({batch_count, 1, 1, element_count});
const auto alpha = Consts<CudaT>::One;
const auto beta = Consts<CudaT>::Zero;
CudnnTensor input_tensor, output_tensor;
input_tensor.Set(dims, CudnnTensor::GetDataType<CudaT>());
output_tensor.Set(dims, CudnnTensor::GetDataType<CudaT>());
cudnnSoftmaxForward(
cudaDnnHandle,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_INSTANCE,
&alpha,
input_tensor,
Y_data,
&beta,
output_tensor,
Y_data);
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(T) \
template void DispatchBiasSoftMaxForwardViaDnnLibraryImpl<T>( \
cudnnHandle_t cudaDnnHandle, \
int element_count, \
int batch_count, \
int broadcast_axis, \
int softmax_axis, \
const onnxruntime::TensorShape& X_shape, \
const Tensor* X_data, \
const onnxruntime::TensorShape& B_shape, \
const Tensor* B_data, \
Tensor* Y_data);
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(MLFloat16)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
62a6588f527960756c7e85439a4d647c4510a612.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file proj_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/proj/proj_enactor.cuh>
#include <gunrock/app/proj/proj_test.cuh>
namespace gunrock {
namespace app {
namespace proj {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
return retval;
}
/**
* @brief Run proj tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT *ref_projections,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("proj", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
ValueT *h_projections = new ValueT[graph.nodes * graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors =
Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
}
cpu_timer.Start();
if (validation == "last") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors = Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
// compute running statistics
// TODO: change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// //Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// //Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_projections;
h_projections = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace proj
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
| 62a6588f527960756c7e85439a4d647c4510a612.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file proj_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/proj/proj_enactor.cuh>
#include <gunrock/app/proj/proj_test.cuh>
namespace gunrock {
namespace app {
namespace proj {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
return retval;
}
/**
* @brief Run proj tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT *ref_projections,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("proj", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
ValueT *h_projections = new ValueT[graph.nodes * graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors =
Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
}
cpu_timer.Start();
if (validation == "last") {
GUARD_CU(problem.Extract(h_projections));
SizeT num_errors = Validate_Results(parameters, graph, h_projections,
quick ? NULL : ref_projections, false);
}
// compute running statistics
// TODO: change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// //Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// //Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_projections;
h_projections = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace proj
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
|
b5cc12f0f57ed7de7314243e5233930403374fb0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#define cout std::cout
#define endl std::endl;
int main(void)
{
hipDeviceProp_t prop;
int count;
cudaErrorCheck(hipGetDeviceCount(&count));
for(int i=0;i<count;i++)
{
cout<<"Printing details about device "<<i<<endl;
cudaErrorCheck(cudaGetDeviceProp(&prop,i));
cout<<"Name: "<<prop.name<<endl;
cout<<"Total Global Memory: "<<prop.totalGlobalMem<<endl;
cout<<"Registers per block: "<<prop.regsPerBlock<<endl;
cout<<"Warp Size: "<<prop.warpSize<<endl;
cout<<"Max Threads Per Block: "<<prop.maxThreadsPerBlock<<endl;
cout<<"Max Thread Dimension: "<<prop.maxThreadsDim[0]<<", "<<prop.maxThreadsDim[1]<<", "<<prop.maxThreadsDim[2]<<", "<<endl;
cout<<"Max Grid Size: "<<prop.maxGridSize[0]<<", "<<prop.maxGridSize[1]<<", "<<prop.maxGridSize[2]<<", "<<endl;
cout<<"Multi Processor Count: "<<prop.multiProcessorCount<<endl;
}
} | b5cc12f0f57ed7de7314243e5233930403374fb0.cu | #include <iostream>
#define cout std::cout
#define endl std::endl;
int main(void)
{
cudaDeviceProp prop;
int count;
cudaErrorCheck(cudaGetDeviceCount(&count));
for(int i=0;i<count;i++)
{
cout<<"Printing details about device "<<i<<endl;
cudaErrorCheck(cudaGetDeviceProp(&prop,i));
cout<<"Name: "<<prop.name<<endl;
cout<<"Total Global Memory: "<<prop.totalGlobalMem<<endl;
cout<<"Registers per block: "<<prop.regsPerBlock<<endl;
cout<<"Warp Size: "<<prop.warpSize<<endl;
cout<<"Max Threads Per Block: "<<prop.maxThreadsPerBlock<<endl;
cout<<"Max Thread Dimension: "<<prop.maxThreadsDim[0]<<", "<<prop.maxThreadsDim[1]<<", "<<prop.maxThreadsDim[2]<<", "<<endl;
cout<<"Max Grid Size: "<<prop.maxGridSize[0]<<", "<<prop.maxGridSize[1]<<", "<<prop.maxGridSize[2]<<", "<<endl;
cout<<"Multi Processor Count: "<<prop.multiProcessorCount<<endl;
}
} |
87861448c9063617c808410f227845675327ce86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mmreader.hpp"
#include <iostream>
#include <math.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#define BLOCK 32
#define SUBSIZE 4096
struct param
{
struct sparse_mtx *A;
struct dense_mtx *B;
struct dense_mtx *C;
int start;
int end;
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void cudaSparseMult(int32_t *Arow, int32_t *Acol, float *Aval, float *Bval, float *Cval, uint32_t size, uint32_t ncol)
{
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < size) && (j < ncol))
{
int32_t start = Arow[i] - Arow[0];
int32_t end = Arow[i + 1] - Arow[0];
for (int32_t k = start; k < end; k++)
{
int32_t col = Acol[k];
float val = Aval[k];
Cval[ncol * i + j] += val * Bval[ncol * col + j];
}
}
}
bool
SCsrMatrixfromFile(struct sparse_mtx *A, const char* filePath)
{
// Check that the file format is matrix market; the only format we can read right now
// This is not a complete solution, and fails for directories with file names etc...
// TODO: Should we use boost filesystem?
std::string strPath( filePath );
if( strPath.find_last_of( '.' ) != std::string::npos )
{
std::string ext = strPath.substr( strPath.find_last_of( '.' ) + 1 );
if( ext != "mtx" )
{
std::cout << "Reading file name error" << std::endl;
return false;
}
}
else
return false;
// Read data from a file on disk into buffers
// Data is read natively as COO format with the reader
MatrixMarketReader mm_reader;
if( mm_reader.MMReadFormat(filePath) )
return false;
// JPA: Shouldn't that just be an assertion check? It seems to me that
// the user have to call clsparseHeaderfromFile before calling this function,
// otherwise the whole pCsrMatrix will be broken;
A->nrow = mm_reader.GetNumRows( );
A->ncol = mm_reader.GetNumCols( );
A->nnze = mm_reader.GetNumNonZeroes( );
A->row = (int32_t *)malloc((A->nrow + 1) * sizeof(int32_t));
A->val = (float *)malloc(A->nnze * sizeof(float));
A->col = (int32_t *)malloc(A->nnze * sizeof(int32_t));
if(A->row == NULL || A->col == NULL || A->val == NULL)
{
if(A->row == NULL)
free((void *)A->row);
if(A->col == NULL)
free((void *)A->col);
if(A->val == NULL)
free((void *)A->val);
return false;
}
// The following section of code converts the sparse format from COO to CSR
Coordinate* coords = mm_reader.GetUnsymCoordinates( );
std::sort( coords, coords + A->nnze, CoordinateCompare );
int32_t current_row = 1;
A->row[ 0 ] = 0;
for (int32_t i = 0; i < A->nnze; i++)
{
A->col[ i ] = coords[ i ].y;
A->val[ i ] = coords[ i ].val;
while( coords[ i ].x >= current_row )
A->row[ current_row++ ] = i;
}
A->row[ current_row ] = A->nnze;
while( current_row <= A->nrow )
A->row[ current_row++ ] = A->nnze;
return true;
}
void multiply_single(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)calloc(C->nrow * C->ncol, sizeof(float));
for (int row = 0; row < A->nrow; row++)
{
int32_t start = A->row[row];
int32_t end = A->row[row + 1];
if (start == A->nnze)
break;
for (int i = start; i < end; i++)
{
int32_t col = A->col[i];
float val = A->val[i];
for (int j = 0; j < B->ncol; j++)
C->val[C->ncol * row + j] += val * B->val[B->ncol * col + j];
}
}
}
void multiply_cuda(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)calloc(C->nrow * C->ncol, sizeof(float));
int32_t *Acol, *Arow;
float *Aval, *Bval, *Cval;
dim3 dimBlock(BLOCK, BLOCK);
int32_t loop_num = B->ncol;
int32_t grid_y = (loop_num / BLOCK) + ((loop_num % BLOCK == 0) ? 0 : 1);
gpuErrchk( hipMalloc(&Bval, sizeof(float) * B->ncol * B->nrow) );
gpuErrchk( hipMemcpy(Bval, B->val, sizeof(float) * B->ncol * B->nrow, hipMemcpyHostToDevice) );
for (uint32_t row = 0; row < A->nrow; row += SUBSIZE)
{
uint32_t size = ((A->nrow - row) < SUBSIZE) ? (A->nrow - row) : SUBSIZE;
int32_t grid_x = (size / BLOCK) + ((size % BLOCK == 0) ? 0 : 1);
gpuErrchk( hipMalloc(&Cval, sizeof(float) * C->ncol * size) );
gpuErrchk( hipMemset(Cval, 0, sizeof(float) * C->ncol * size) );
gpuErrchk( hipMalloc(&Arow, sizeof(int32_t) * (size + 1)) );
gpuErrchk( hipMemcpy(Arow, &A->row[row], sizeof(int32_t) * (size + 1), hipMemcpyHostToDevice) );
gpuErrchk( hipMalloc(&Acol, sizeof(int32_t) * (A->row[row + size] - A->row[row])) );
gpuErrchk( hipMemcpy(Acol, &A->col[A->row[row]], sizeof(int32_t) * (A->row[row + size] - A->row[row]), hipMemcpyHostToDevice) );
gpuErrchk( hipMalloc(&Aval, sizeof(int32_t) * (A->row[row + size] - A->row[row])) );
gpuErrchk( hipMemcpy(Aval, &A->val[A->row[row]], sizeof(int32_t) * (A->row[row + size] - A->row[row]), hipMemcpyHostToDevice) );
dim3 dimGrid(grid_x, grid_y);
hipLaunchKernelGGL(( cudaSparseMult), dim3(dimGrid), dim3(dimBlock), 0, 0, Arow, Acol, Aval, Bval, Cval, size, C->ncol);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy(&C->val[row * C->ncol], Cval, sizeof(float) * C->ncol * size, hipMemcpyDeviceToHost) );
gpuErrchk( hipFree(Cval) );
gpuErrchk( hipFree(Arow) );
gpuErrchk( hipFree(Acol) );
gpuErrchk( hipFree(Aval) );
}
gpuErrchk( hipFree(Bval) );
}
int compare_matrix(struct dense_mtx *C1, struct dense_mtx *C2)
{
if (C1->nrow != C2->nrow || C1->ncol != C2->ncol)
return 1;
for (int i = 0; i < C1->nrow; i++)
{
for (int j = 0; j < C1->ncol; j++)
{
if ((fabsf(C1->val[C1->ncol * i + j] - C2->val[C2->ncol * i + j]) > fabsf(C1->val[C1->ncol * i + j]) * 0.1) && (fabsf(C1->val[C1->ncol * i + j] - C2->val[C2->ncol * i + j]) > 1.0))
{
printf("C1, C2: %f, %f\n", C1->val[C1->ncol * i + j], C2->val[C2->ncol * i + j]);
return 1;
}
}
}
return 0;
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec*(uint64_t)1000000+tv.tv_usec;
}
int main(int argc, char **argv)
{
struct sparse_mtx A;
if(!SCsrMatrixfromFile(&A, argv[1]))
{
std::cout << "read failed." << std::endl;
return 0;
}
struct dense_mtx B;
B.nrow = A.ncol;
B.ncol = atoi(argv[2]);
if((int32_t) B.ncol < 0)
{
free(A.row);
free(A.col);
free(A.val);
std::cerr << "Invalid argument for the number of columns of B." << std::endl;
}
B.val = (float *)malloc(sizeof(float) * B.nrow * B.ncol);
srand((unsigned int)time(NULL));
for(int i = 0; i < B.nrow; i++)
{
for(int j = 0; j < B.ncol; j++)
{
B.val[B.ncol * i + j] = ((float)rand()/(float)(RAND_MAX)) * ((rand() % 2) ? 1.0f : -1.0f);
}
}
struct dense_mtx C1, C2;
C1.val = NULL;
C2.val = NULL;
uint64_t start, end;
std::cout << "Single Thread Computation Start" << std::endl;
start = GetTimeStamp();
multiply_single(&A, &B, &C1);
end = GetTimeStamp();
std::cout << "Single Thread Computation End: " << end - start << " us." << std::endl;
std::cout << "Multi Thread Computation Start" << std::endl;
start = GetTimeStamp();
multiply_cuda(&A, &B, &C2);
end = GetTimeStamp();
std::cout << "Multi Thread Computation End: " << end - start << " us." << std::endl;
if (compare_matrix(&C1, &C2))
std::cout << "Verification Fail." << std::endl;
else
std::cout << "Verification Success." << std::endl;
free(A.row);
free(A.col);
free(A.val);
free(B.val);
if(C1.val != NULL)
free(C1.val);
if(C2.val != NULL)
free(C2.val);
return 0;
}
| 87861448c9063617c808410f227845675327ce86.cu | #include "mmreader.hpp"
#include <iostream>
#include <math.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#define BLOCK 32
#define SUBSIZE 4096
struct param
{
struct sparse_mtx *A;
struct dense_mtx *B;
struct dense_mtx *C;
int start;
int end;
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void cudaSparseMult(int32_t *Arow, int32_t *Acol, float *Aval, float *Bval, float *Cval, uint32_t size, uint32_t ncol)
{
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < size) && (j < ncol))
{
int32_t start = Arow[i] - Arow[0];
int32_t end = Arow[i + 1] - Arow[0];
for (int32_t k = start; k < end; k++)
{
int32_t col = Acol[k];
float val = Aval[k];
Cval[ncol * i + j] += val * Bval[ncol * col + j];
}
}
}
bool
SCsrMatrixfromFile(struct sparse_mtx *A, const char* filePath)
{
// Check that the file format is matrix market; the only format we can read right now
// This is not a complete solution, and fails for directories with file names etc...
// TODO: Should we use boost filesystem?
std::string strPath( filePath );
if( strPath.find_last_of( '.' ) != std::string::npos )
{
std::string ext = strPath.substr( strPath.find_last_of( '.' ) + 1 );
if( ext != "mtx" )
{
std::cout << "Reading file name error" << std::endl;
return false;
}
}
else
return false;
// Read data from a file on disk into buffers
// Data is read natively as COO format with the reader
MatrixMarketReader mm_reader;
if( mm_reader.MMReadFormat(filePath) )
return false;
// JPA: Shouldn't that just be an assertion check? It seems to me that
// the user have to call clsparseHeaderfromFile before calling this function,
// otherwise the whole pCsrMatrix will be broken;
A->nrow = mm_reader.GetNumRows( );
A->ncol = mm_reader.GetNumCols( );
A->nnze = mm_reader.GetNumNonZeroes( );
A->row = (int32_t *)malloc((A->nrow + 1) * sizeof(int32_t));
A->val = (float *)malloc(A->nnze * sizeof(float));
A->col = (int32_t *)malloc(A->nnze * sizeof(int32_t));
if(A->row == NULL || A->col == NULL || A->val == NULL)
{
if(A->row == NULL)
free((void *)A->row);
if(A->col == NULL)
free((void *)A->col);
if(A->val == NULL)
free((void *)A->val);
return false;
}
// The following section of code converts the sparse format from COO to CSR
Coordinate* coords = mm_reader.GetUnsymCoordinates( );
std::sort( coords, coords + A->nnze, CoordinateCompare );
int32_t current_row = 1;
A->row[ 0 ] = 0;
for (int32_t i = 0; i < A->nnze; i++)
{
A->col[ i ] = coords[ i ].y;
A->val[ i ] = coords[ i ].val;
while( coords[ i ].x >= current_row )
A->row[ current_row++ ] = i;
}
A->row[ current_row ] = A->nnze;
while( current_row <= A->nrow )
A->row[ current_row++ ] = A->nnze;
return true;
}
void multiply_single(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)calloc(C->nrow * C->ncol, sizeof(float));
for (int row = 0; row < A->nrow; row++)
{
int32_t start = A->row[row];
int32_t end = A->row[row + 1];
if (start == A->nnze)
break;
for (int i = start; i < end; i++)
{
int32_t col = A->col[i];
float val = A->val[i];
for (int j = 0; j < B->ncol; j++)
C->val[C->ncol * row + j] += val * B->val[B->ncol * col + j];
}
}
}
void multiply_cuda(struct sparse_mtx *A, struct dense_mtx *B, struct dense_mtx *C)
{
C->nrow = A->nrow;
C->ncol = B->ncol;
C->val = (float *)calloc(C->nrow * C->ncol, sizeof(float));
int32_t *Acol, *Arow;
float *Aval, *Bval, *Cval;
dim3 dimBlock(BLOCK, BLOCK);
int32_t loop_num = B->ncol;
int32_t grid_y = (loop_num / BLOCK) + ((loop_num % BLOCK == 0) ? 0 : 1);
gpuErrchk( cudaMalloc(&Bval, sizeof(float) * B->ncol * B->nrow) );
gpuErrchk( cudaMemcpy(Bval, B->val, sizeof(float) * B->ncol * B->nrow, cudaMemcpyHostToDevice) );
for (uint32_t row = 0; row < A->nrow; row += SUBSIZE)
{
uint32_t size = ((A->nrow - row) < SUBSIZE) ? (A->nrow - row) : SUBSIZE;
int32_t grid_x = (size / BLOCK) + ((size % BLOCK == 0) ? 0 : 1);
gpuErrchk( cudaMalloc(&Cval, sizeof(float) * C->ncol * size) );
gpuErrchk( cudaMemset(Cval, 0, sizeof(float) * C->ncol * size) );
gpuErrchk( cudaMalloc(&Arow, sizeof(int32_t) * (size + 1)) );
gpuErrchk( cudaMemcpy(Arow, &A->row[row], sizeof(int32_t) * (size + 1), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMalloc(&Acol, sizeof(int32_t) * (A->row[row + size] - A->row[row])) );
gpuErrchk( cudaMemcpy(Acol, &A->col[A->row[row]], sizeof(int32_t) * (A->row[row + size] - A->row[row]), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMalloc(&Aval, sizeof(int32_t) * (A->row[row + size] - A->row[row])) );
gpuErrchk( cudaMemcpy(Aval, &A->val[A->row[row]], sizeof(int32_t) * (A->row[row + size] - A->row[row]), cudaMemcpyHostToDevice) );
dim3 dimGrid(grid_x, grid_y);
cudaSparseMult<<<dimGrid, dimBlock>>>(Arow, Acol, Aval, Bval, Cval, size, C->ncol);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy(&C->val[row * C->ncol], Cval, sizeof(float) * C->ncol * size, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaFree(Cval) );
gpuErrchk( cudaFree(Arow) );
gpuErrchk( cudaFree(Acol) );
gpuErrchk( cudaFree(Aval) );
}
gpuErrchk( cudaFree(Bval) );
}
int compare_matrix(struct dense_mtx *C1, struct dense_mtx *C2)
{
if (C1->nrow != C2->nrow || C1->ncol != C2->ncol)
return 1;
for (int i = 0; i < C1->nrow; i++)
{
for (int j = 0; j < C1->ncol; j++)
{
if ((fabsf(C1->val[C1->ncol * i + j] - C2->val[C2->ncol * i + j]) > fabsf(C1->val[C1->ncol * i + j]) * 0.1) && (fabsf(C1->val[C1->ncol * i + j] - C2->val[C2->ncol * i + j]) > 1.0))
{
printf("C1, C2: %f, %f\n", C1->val[C1->ncol * i + j], C2->val[C2->ncol * i + j]);
return 1;
}
}
}
return 0;
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec*(uint64_t)1000000+tv.tv_usec;
}
int main(int argc, char **argv)
{
struct sparse_mtx A;
if(!SCsrMatrixfromFile(&A, argv[1]))
{
std::cout << "read failed." << std::endl;
return 0;
}
struct dense_mtx B;
B.nrow = A.ncol;
B.ncol = atoi(argv[2]);
if((int32_t) B.ncol < 0)
{
free(A.row);
free(A.col);
free(A.val);
std::cerr << "Invalid argument for the number of columns of B." << std::endl;
}
B.val = (float *)malloc(sizeof(float) * B.nrow * B.ncol);
srand((unsigned int)time(NULL));
for(int i = 0; i < B.nrow; i++)
{
for(int j = 0; j < B.ncol; j++)
{
B.val[B.ncol * i + j] = ((float)rand()/(float)(RAND_MAX)) * ((rand() % 2) ? 1.0f : -1.0f);
}
}
struct dense_mtx C1, C2;
C1.val = NULL;
C2.val = NULL;
uint64_t start, end;
std::cout << "Single Thread Computation Start" << std::endl;
start = GetTimeStamp();
multiply_single(&A, &B, &C1);
end = GetTimeStamp();
std::cout << "Single Thread Computation End: " << end - start << " us." << std::endl;
std::cout << "Multi Thread Computation Start" << std::endl;
start = GetTimeStamp();
multiply_cuda(&A, &B, &C2);
end = GetTimeStamp();
std::cout << "Multi Thread Computation End: " << end - start << " us." << std::endl;
if (compare_matrix(&C1, &C2))
std::cout << "Verification Fail." << std::endl;
else
std::cout << "Verification Success." << std::endl;
free(A.row);
free(A.col);
free(A.val);
free(B.val);
if(C1.val != NULL)
free(C1.val);
if(C2.val != NULL)
free(C2.val);
return 0;
}
|
8ea8c996418e63d52734ccdfaece309e56d20fa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/serialize/tensor.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradMean,
DeviceTensor<DType, 1> gradStd,
bool train) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (train && threadIdx.x == 0) {
gradMean[c] = - gradOutputSum * gamma[c] * invstd;
gradStd[c] = - dotP * gamma[c] * invstd * invstd;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Sum_Square_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> sum,
DeviceTensor<DType, 1> square) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
sum[c] = xsum;
square[c] = xsquare;
}
}
template <typename DType>
__global__ void Sum_Square_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradSum,
DeviceTensor<DType, 1> gradSquare) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x)
{
gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] *
input[batch][c][x];
}
}
}
} // namespcae
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_) {
auto output_ = at::zeros_like(input_);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
output, input, mean, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output_;
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_,
bool train) {
/* outputs*/
at::Tensor gradinput_ = at::zeros_like(input_);
at::Tensor gradgamma_ = at::zeros_like(gamma_);
at::Tensor gradbeta_ = at::zeros_like(beta_);
at::Tensor gradMean_ = at::zeros_like(mean_);
at::Tensor gradStd_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_);
DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, input, gradinput, gradgamma, gradbeta, mean, std,
gamma, beta, gradMean, gradStd, train);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Sum_Square_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
at::Tensor sum_ = torch::zeros({input_.size(1)}, input_.options());
at::Tensor square_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_);
DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_);
/* kernel function */
hipLaunchKernelGGL(( Sum_Square_Forward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, input, sum, square);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {sum_, square_};
}
at::Tensor Sum_Square_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradSum_,
const at::Tensor gradSquare_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_);
DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_);
/* kernel function */
hipLaunchKernelGGL(( Sum_Square_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradSum, gradSquare);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
| 8ea8c996418e63d52734ccdfaece309e56d20fa0.cu | #include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/serialize/tensor.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradMean,
DeviceTensor<DType, 1> gradStd,
bool train) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (train && threadIdx.x == 0) {
gradMean[c] = - gradOutputSum * gamma[c] * invstd;
gradStd[c] = - dotP * gamma[c] * invstd * invstd;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Sum_Square_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> sum,
DeviceTensor<DType, 1> square) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
sum[c] = xsum;
square[c] = xsquare;
}
}
template <typename DType>
__global__ void Sum_Square_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradSum,
DeviceTensor<DType, 1> gradSquare) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x)
{
gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] *
input[batch][c][x];
}
}
}
} // namespcae
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_) {
auto output_ = at::zeros_like(input_);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
output, input, mean, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output_;
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_,
bool train) {
/* outputs*/
at::Tensor gradinput_ = at::zeros_like(input_);
at::Tensor gradgamma_ = at::zeros_like(gamma_);
at::Tensor gradbeta_ = at::zeros_like(beta_);
at::Tensor gradMean_ = at::zeros_like(mean_);
at::Tensor gradStd_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_);
DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_);
/* kernel function */
BatchNorm_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, input, gradinput, gradgamma, gradbeta, mean, std,
gamma, beta, gradMean, gradStd, train);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Sum_Square_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
at::Tensor sum_ = torch::zeros({input_.size(1)}, input_.options());
at::Tensor square_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_);
DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_);
/* kernel function */
Sum_Square_Forward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(input, sum, square);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {sum_, square_};
}
at::Tensor Sum_Square_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradSum_,
const at::Tensor gradSquare_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_);
DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_);
/* kernel function */
Sum_Square_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradSum, gradSquare);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
|
44a1b666d712e37d2cd810e75ac531d13b6547ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"{
__global__ void threshold(unsigned char * src,unsigned char * dst,int width,int height,int thresh){
//Gridx
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Gridy
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
if (src[idx] > thresh){
dst[idx] = 255;
}else{
dst[idx] = 0;
}
}
}
__global__ void multi_threshold(unsigned char * src,unsigned char * dst,int width,int height,int min_thresh,int max_thresh){
//Gridx
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Gridy
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
int pixel = src[idx];
if (pixel >= min_thresh && pixel <= max_thresh){
dst[idx] = 255;
}else{
dst[idx] = 0;
}
}
}
} | 44a1b666d712e37d2cd810e75ac531d13b6547ff.cu | extern "C"{
__global__ void threshold(unsigned char * src,unsigned char * dst,int width,int height,int thresh){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
if (src[idx] > thresh){
dst[idx] = 255;
}else{
dst[idx] = 0;
}
}
}
__global__ void multi_threshold(unsigned char * src,unsigned char * dst,int width,int height,int min_thresh,int max_thresh){
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if (xIndex < width && yIndex < height && idx < width * height){
int pixel = src[idx];
if (pixel >= min_thresh && pixel <= max_thresh){
dst[idx] = 255;
}else{
dst[idx] = 0;
}
}
}
} |
2dd38ceba577999055e0cf5b3bce0dd126c84182.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T> __global__ void kernelgpuInitu1(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
int j = i%npe;
int k = (i-j)/npe;
T xdg1 = xdg[j+npe*0+npe*ncx*k];
T xdg2 = xdg[j+npe*1+npe*ncx*k];
f[j+npe*0+npe*nce*k] = 0.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuInitu1(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( kernelgpuInitu1), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne);
}
template void gpuInitu1(double *, double *, double *, double *, int, int, int, int, int, int);
template void gpuInitu1(float *, float *, float *, float *, int, int, int, int, int, int);
| 2dd38ceba577999055e0cf5b3bce0dd126c84182.cu | template <typename T> __global__ void kernelgpuInitu1(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
int j = i%npe;
int k = (i-j)/npe;
T xdg1 = xdg[j+npe*0+npe*ncx*k];
T xdg2 = xdg[j+npe*1+npe*ncx*k];
f[j+npe*0+npe*nce*k] = 0.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuInitu1(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
kernelgpuInitu1<<<gridDim, blockDim>>>(f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne);
}
template void gpuInitu1(double *, double *, double *, double *, int, int, int, int, int, int);
template void gpuInitu1(float *, float *, float *, float *, int, int, int, int, int, int);
|
41b9347beb1be9a239f4d0791a7aa928d94db749.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/fast_rcnn_layers.hpp"
#include "caffe/util/nms.hpp"
namespace caffe {
template <typename Dtype>
__device__
static
int transform_box(Dtype box[],
const Dtype dx, const Dtype dy,
const Dtype d_log_w, const Dtype d_log_h,
const Dtype img_W, const Dtype img_H,
const Dtype min_box_W, const Dtype min_box_H)
{
// width & height of box
const Dtype w = box[2] - box[0] + (Dtype)1;
const Dtype h = box[3] - box[1] + (Dtype)1;
// center location of box
const Dtype ctr_x = box[0] + (Dtype)0.5 * w;
const Dtype ctr_y = box[1] + (Dtype)0.5 * h;
// new center location according to gradient (dx, dy)
const Dtype pred_ctr_x = dx * w + ctr_x;
const Dtype pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const Dtype pred_w = exp(d_log_w) * w;
const Dtype pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - (Dtype)0.5 * pred_w;
box[1] = pred_ctr_y - (Dtype)0.5 * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + (Dtype)0.5 * pred_w;
box[3] = pred_ctr_y + (Dtype)0.5 * pred_h;
// adjust new corner locations to be within the image region,
box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1));
box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1));
box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1));
box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1));
// recompute new width & height
const Dtype box_w = box[2] - box[0] + (Dtype)1;
const Dtype box_h = box[3] - box[1] + (Dtype)1;
// check if new box's size >= threshold
return (box_w >= min_box_W) * (box_h >= min_box_H);
}
template <typename Dtype>
static
void sort_box(Dtype list_cpu[], const int start, const int end,
const int num_top)
{
const Dtype pivot_score = list_cpu[start * 5 + 4];
int left = start + 1, right = end;
Dtype temp[5];
while (left <= right) {
while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left;
while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right;
if (left <= right) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[left * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[left * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
++left;
--right;
}
}
if (right > start) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[start * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[start * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
}
if (start < right - 1) {
sort_box(list_cpu, start, right - 1, num_top);
}
if (right + 1 < num_top && right + 1 < end) {
sort_box(list_cpu, right + 1, end, num_top);
}
}
template <typename Dtype>
__global__
static
void enumerate_proposals_gpu(const int nthreads,
const Dtype bottom4d[],
const Dtype d_anchor4d[],
const Dtype anchors[],
Dtype proposals[],
const int num_anchors,
const int bottom_H, const int bottom_W,
const Dtype img_H, const Dtype img_W,
const Dtype min_box_H, const Dtype min_box_W,
const int feat_stride)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = index / num_anchors / bottom_W;
const int w = (index / num_anchors) % bottom_W;
const int k = index % num_anchors;
const Dtype x = w * feat_stride;
const Dtype y = h * feat_stride;
const Dtype* p_box = d_anchor4d + h * bottom_W + w;
const Dtype* p_score = bottom4d + h * bottom_W + w;
const int bottom_area = bottom_H * bottom_W;
const Dtype dx = p_box[(k * 4 + 0) * bottom_area];
const Dtype dy = p_box[(k * 4 + 1) * bottom_area];
const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area];
const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area];
Dtype* const p_proposal = proposals + index * 5;
p_proposal[0] = x + anchors[k * 4 + 0];
p_proposal[1] = y + anchors[k * 4 + 1];
p_proposal[2] = x + anchors[k * 4 + 2];
p_proposal[3] = y + anchors[k * 4 + 3];
p_proposal[4]
= transform_box(p_proposal,
dx, dy, d_log_w, d_log_h,
img_W, img_H, min_box_W, min_box_H)
* p_score[k * bottom_area];
}
}
template <typename Dtype>
__global__
static
void retrieve_rois_gpu(const int nthreads,
const int item_index,
const Dtype proposals[],
const int roi_indices[],
Dtype rois[],
Dtype roi_scores[])
{
CUDA_KERNEL_LOOP(index, nthreads) {
const Dtype* const proposals_index = proposals + roi_indices[index] * 5;
rois[index * 5 + 0] = item_index;
rois[index * 5 + 1] = proposals_index[0];
rois[index * 5 + 2] = proposals_index[1];
rois[index * 5 + 3] = proposals_index[2];
rois[index * 5 + 4] = proposals_index[3];
if (roi_scores) {
roi_scores[index] = proposals_index[4];
}
}
}
template <typename Dtype>
void ProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
CHECK_EQ(bottom[0]->shape(0), 1) << "Only single item batches are supported";
const Dtype* p_bottom_item = bottom[0]->gpu_data();
const Dtype* p_d_anchor_item = bottom[1]->gpu_data();
const Dtype* p_img_info_cpu = bottom[2]->cpu_data();
Dtype* p_roi_item = top[0]->mutable_gpu_data();
Dtype* p_score_item = (top.size() > 1) ? top[1]->mutable_gpu_data() : NULL;
vector<int> proposals_shape(2);
vector<int> top_shape(2);
proposals_shape[0] = 0;
proposals_shape[1] = 5;
top_shape[0] = 0;
top_shape[1] = 5;
for (int n = 0; n < bottom[0]->shape(0); ++n) {
// bottom shape: (2 x num_anchors) x H x W
const int bottom_H = bottom[0]->height();
const int bottom_W = bottom[0]->width();
// input image height & width
const Dtype img_H = p_img_info_cpu[0];
const Dtype img_W = p_img_info_cpu[1];
// scale factor for height & width
const Dtype scale_H = p_img_info_cpu[2];
const Dtype scale_W = p_img_info_cpu[3];
// minimum box width & height
const Dtype min_box_H = min_size_ * scale_H;
const Dtype min_box_W = min_size_ * scale_W;
// number of all proposals = num_anchors * H * W
const int num_proposals = anchors_.shape(0) * bottom_H * bottom_W;
// number of top-n proposals before NMS
const int pre_nms_topn = ::min(num_proposals, pre_nms_topn_);
/*
LOG_IF(INFO, pre_nms_topn) << " pre_nms_topn " << pre_nms_topn;
LOG_IF(INFO, num_proposals) << " num_proposals " << num_proposals;
*/
// number of final RoIs
int num_rois = 0;
// enumerate all proposals
// num_proposals = num_anchors * H * W
// (x1, y1, x2, y2, score) for each proposal
// NOTE: for bottom, only foreground scores are passed
proposals_shape[0] = num_proposals;
proposals_.Reshape(proposals_shape);
hipLaunchKernelGGL(( enumerate_proposals_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(num_proposals)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_proposals,
p_bottom_item + num_proposals, p_d_anchor_item,
anchors_.gpu_data(), proposals_.mutable_gpu_data(), anchors_.shape(0),
bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W,
feat_stride_);
CUDA_POST_KERNEL_CHECK;
sort_box(proposals_.mutable_cpu_data(), 0, num_proposals - 1, pre_nms_topn_);
nms_gpu(pre_nms_topn, proposals_.gpu_data(), &nms_mask_,
roi_indices_.mutable_cpu_data(), &num_rois,
0, nms_thresh_, post_nms_topn_);
/*
LOG_IF(INFO, post_nms_topn_) << " post_nms_topn_ " << post_nms_topn_;
LOG_IF(INFO, num_rois) << " num_rois " << num_rois;
*/
hipLaunchKernelGGL(( retrieve_rois_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(num_rois)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_rois, n, proposals_.gpu_data(), roi_indices_.gpu_data(),
p_roi_item, p_score_item);
CUDA_POST_KERNEL_CHECK;
top_shape[0] += num_rois;
}
top[0]->Reshape(top_shape);
if (top.size() > 1) {
top_shape.pop_back();
top[1]->Reshape(top_shape);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ProposalLayer);
} // namespace caffe
| 41b9347beb1be9a239f4d0791a7aa928d94db749.cu | #include "caffe/fast_rcnn_layers.hpp"
#include "caffe/util/nms.hpp"
namespace caffe {
template <typename Dtype>
__device__
static
int transform_box(Dtype box[],
const Dtype dx, const Dtype dy,
const Dtype d_log_w, const Dtype d_log_h,
const Dtype img_W, const Dtype img_H,
const Dtype min_box_W, const Dtype min_box_H)
{
// width & height of box
const Dtype w = box[2] - box[0] + (Dtype)1;
const Dtype h = box[3] - box[1] + (Dtype)1;
// center location of box
const Dtype ctr_x = box[0] + (Dtype)0.5 * w;
const Dtype ctr_y = box[1] + (Dtype)0.5 * h;
// new center location according to gradient (dx, dy)
const Dtype pred_ctr_x = dx * w + ctr_x;
const Dtype pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const Dtype pred_w = exp(d_log_w) * w;
const Dtype pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - (Dtype)0.5 * pred_w;
box[1] = pred_ctr_y - (Dtype)0.5 * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + (Dtype)0.5 * pred_w;
box[3] = pred_ctr_y + (Dtype)0.5 * pred_h;
// adjust new corner locations to be within the image region,
box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1));
box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1));
box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1));
box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1));
// recompute new width & height
const Dtype box_w = box[2] - box[0] + (Dtype)1;
const Dtype box_h = box[3] - box[1] + (Dtype)1;
// check if new box's size >= threshold
return (box_w >= min_box_W) * (box_h >= min_box_H);
}
template <typename Dtype>
static
void sort_box(Dtype list_cpu[], const int start, const int end,
const int num_top)
{
const Dtype pivot_score = list_cpu[start * 5 + 4];
int left = start + 1, right = end;
Dtype temp[5];
while (left <= right) {
while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left;
while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right;
if (left <= right) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[left * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[left * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
++left;
--right;
}
}
if (right > start) {
for (int i = 0; i < 5; ++i) {
temp[i] = list_cpu[start * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[start * 5 + i] = list_cpu[right * 5 + i];
}
for (int i = 0; i < 5; ++i) {
list_cpu[right * 5 + i] = temp[i];
}
}
if (start < right - 1) {
sort_box(list_cpu, start, right - 1, num_top);
}
if (right + 1 < num_top && right + 1 < end) {
sort_box(list_cpu, right + 1, end, num_top);
}
}
template <typename Dtype>
__global__
static
void enumerate_proposals_gpu(const int nthreads,
const Dtype bottom4d[],
const Dtype d_anchor4d[],
const Dtype anchors[],
Dtype proposals[],
const int num_anchors,
const int bottom_H, const int bottom_W,
const Dtype img_H, const Dtype img_W,
const Dtype min_box_H, const Dtype min_box_W,
const int feat_stride)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = index / num_anchors / bottom_W;
const int w = (index / num_anchors) % bottom_W;
const int k = index % num_anchors;
const Dtype x = w * feat_stride;
const Dtype y = h * feat_stride;
const Dtype* p_box = d_anchor4d + h * bottom_W + w;
const Dtype* p_score = bottom4d + h * bottom_W + w;
const int bottom_area = bottom_H * bottom_W;
const Dtype dx = p_box[(k * 4 + 0) * bottom_area];
const Dtype dy = p_box[(k * 4 + 1) * bottom_area];
const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area];
const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area];
Dtype* const p_proposal = proposals + index * 5;
p_proposal[0] = x + anchors[k * 4 + 0];
p_proposal[1] = y + anchors[k * 4 + 1];
p_proposal[2] = x + anchors[k * 4 + 2];
p_proposal[3] = y + anchors[k * 4 + 3];
p_proposal[4]
= transform_box(p_proposal,
dx, dy, d_log_w, d_log_h,
img_W, img_H, min_box_W, min_box_H)
* p_score[k * bottom_area];
}
}
template <typename Dtype>
__global__
static
void retrieve_rois_gpu(const int nthreads,
const int item_index,
const Dtype proposals[],
const int roi_indices[],
Dtype rois[],
Dtype roi_scores[])
{
CUDA_KERNEL_LOOP(index, nthreads) {
const Dtype* const proposals_index = proposals + roi_indices[index] * 5;
rois[index * 5 + 0] = item_index;
rois[index * 5 + 1] = proposals_index[0];
rois[index * 5 + 2] = proposals_index[1];
rois[index * 5 + 3] = proposals_index[2];
rois[index * 5 + 4] = proposals_index[3];
if (roi_scores) {
roi_scores[index] = proposals_index[4];
}
}
}
template <typename Dtype>
void ProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
CHECK_EQ(bottom[0]->shape(0), 1) << "Only single item batches are supported";
const Dtype* p_bottom_item = bottom[0]->gpu_data();
const Dtype* p_d_anchor_item = bottom[1]->gpu_data();
const Dtype* p_img_info_cpu = bottom[2]->cpu_data();
Dtype* p_roi_item = top[0]->mutable_gpu_data();
Dtype* p_score_item = (top.size() > 1) ? top[1]->mutable_gpu_data() : NULL;
vector<int> proposals_shape(2);
vector<int> top_shape(2);
proposals_shape[0] = 0;
proposals_shape[1] = 5;
top_shape[0] = 0;
top_shape[1] = 5;
for (int n = 0; n < bottom[0]->shape(0); ++n) {
// bottom shape: (2 x num_anchors) x H x W
const int bottom_H = bottom[0]->height();
const int bottom_W = bottom[0]->width();
// input image height & width
const Dtype img_H = p_img_info_cpu[0];
const Dtype img_W = p_img_info_cpu[1];
// scale factor for height & width
const Dtype scale_H = p_img_info_cpu[2];
const Dtype scale_W = p_img_info_cpu[3];
// minimum box width & height
const Dtype min_box_H = min_size_ * scale_H;
const Dtype min_box_W = min_size_ * scale_W;
// number of all proposals = num_anchors * H * W
const int num_proposals = anchors_.shape(0) * bottom_H * bottom_W;
// number of top-n proposals before NMS
const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_);
/*
LOG_IF(INFO, pre_nms_topn) << " pre_nms_topn " << pre_nms_topn;
LOG_IF(INFO, num_proposals) << " num_proposals " << num_proposals;
*/
// number of final RoIs
int num_rois = 0;
// enumerate all proposals
// num_proposals = num_anchors * H * W
// (x1, y1, x2, y2, score) for each proposal
// NOTE: for bottom, only foreground scores are passed
proposals_shape[0] = num_proposals;
proposals_.Reshape(proposals_shape);
enumerate_proposals_gpu<Dtype><<<CAFFE_GET_BLOCKS(num_proposals),
CAFFE_CUDA_NUM_THREADS>>>(
num_proposals,
p_bottom_item + num_proposals, p_d_anchor_item,
anchors_.gpu_data(), proposals_.mutable_gpu_data(), anchors_.shape(0),
bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W,
feat_stride_);
CUDA_POST_KERNEL_CHECK;
sort_box(proposals_.mutable_cpu_data(), 0, num_proposals - 1, pre_nms_topn_);
nms_gpu(pre_nms_topn, proposals_.gpu_data(), &nms_mask_,
roi_indices_.mutable_cpu_data(), &num_rois,
0, nms_thresh_, post_nms_topn_);
/*
LOG_IF(INFO, post_nms_topn_) << " post_nms_topn_ " << post_nms_topn_;
LOG_IF(INFO, num_rois) << " num_rois " << num_rois;
*/
retrieve_rois_gpu<Dtype><<<CAFFE_GET_BLOCKS(num_rois),
CAFFE_CUDA_NUM_THREADS>>>(
num_rois, n, proposals_.gpu_data(), roi_indices_.gpu_data(),
p_roi_item, p_score_item);
CUDA_POST_KERNEL_CHECK;
top_shape[0] += num_rois;
}
top[0]->Reshape(top_shape);
if (top.size() > 1) {
top_shape.pop_back();
top[1]->Reshape(top_shape);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ProposalLayer);
} // namespace caffe
|
8851b24c1de9f1aff6c722398be96fc88d228853.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void blurGauss ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 1 && i < (cols - 2) && j > 1 && j < (rows - 2)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 2) * cols + i - 2) * 3 + c] + 4 * data[((j - 2) * cols + i - 1) * 3 + c]
+ 7 * data[((j - 2) * cols + i) * 3 + c]
+ 4 * data[((j - 2) * cols + i + 1) * 3 + c] + data[((j - 2) * cols + i + 2) * 3 + c]
+ 4 * data[((j - 1) * cols + i - 2) * 3 + c] + 7 * data[((j - 1) * cols + i - 1) * 3 + c]
+ 26 * data[((j - 1) * cols + i) * 3 + c]
+ 7 * data[((j - 1) * cols + i + 1) * 3 + c] + 4 * data[((j - 1) * cols + i + 2) * 3 + c]
+ 7 * data[((j) * cols + i - 2) * 3 + c] + 26 * data[((j) * cols + i - 1) * 3 + c]
+ 41 * data[((j) * cols + i) * 3 + c]
+ 26 * data[((j) * cols + i + 1) * 3 + c] + 7 * data[((j) * cols + i + 2) * 3 + c]
+ 4 * data[((j + 1) * cols + i - 2) * 3 + c] + 16 * data[((j + 1) * cols + i - 1) * 3 + c]
+ 26 * data[((j + 1) * cols + i) * 3 + c]
+ 16 * data[((j + 1) * cols + i + 1) * 3 + c] + 4 * data[((j + 1) * cols + i + 2) * 3 + c]
+ data[((j + 2) * cols + i - 2) * 3 + c] + 4 * data[((j + 2) * cols + i - 1) * 3 + c]
+ 7 * data[((j + 2) * cols + i) * 3 + c]
+ 4 * data[((j + 2) * cols + i + 1) * 3 + c] + data[((j + 2) * cols + i + 2) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 273);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
hipMemcpy( rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 be(( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols - 1) / t.x + 1 ), 2 * (( rows - 1 ) / t.y + 1 ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols - 1) / t.x + 1 ), 8 * (( rows - 1 ) / t.y + 1 ));
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipLaunchKernelGGL(( blurGauss), dim3(be), dim3(t) , 0, 0, rgb_d, out, cols, rows );
hipMemcpy(g.data(), out, 3 * rows * cols, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlurGauss.jpg", m_out );
hipFree( rgb_d);
//hipFree( g_d);
hipFree ( out);
return 0;
}
| 8851b24c1de9f1aff6c722398be96fc88d228853.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void blurGauss ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 1 && i < (cols - 2) && j > 1 && j < (rows - 2)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 2) * cols + i - 2) * 3 + c] + 4 * data[((j - 2) * cols + i - 1) * 3 + c]
+ 7 * data[((j - 2) * cols + i) * 3 + c]
+ 4 * data[((j - 2) * cols + i + 1) * 3 + c] + data[((j - 2) * cols + i + 2) * 3 + c]
+ 4 * data[((j - 1) * cols + i - 2) * 3 + c] + 7 * data[((j - 1) * cols + i - 1) * 3 + c]
+ 26 * data[((j - 1) * cols + i) * 3 + c]
+ 7 * data[((j - 1) * cols + i + 1) * 3 + c] + 4 * data[((j - 1) * cols + i + 2) * 3 + c]
+ 7 * data[((j) * cols + i - 2) * 3 + c] + 26 * data[((j) * cols + i - 1) * 3 + c]
+ 41 * data[((j) * cols + i) * 3 + c]
+ 26 * data[((j) * cols + i + 1) * 3 + c] + 7 * data[((j) * cols + i + 2) * 3 + c]
+ 4 * data[((j + 1) * cols + i - 2) * 3 + c] + 16 * data[((j + 1) * cols + i - 1) * 3 + c]
+ 26 * data[((j + 1) * cols + i) * 3 + c]
+ 16 * data[((j + 1) * cols + i + 1) * 3 + c] + 4 * data[((j + 1) * cols + i + 2) * 3 + c]
+ data[((j + 2) * cols + i - 2) * 3 + c] + 4 * data[((j + 2) * cols + i - 1) * 3 + c]
+ 7 * data[((j + 2) * cols + i) * 3 + c]
+ 4 * data[((j + 2) * cols + i + 1) * 3 + c] + data[((j + 2) * cols + i + 2) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 273);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
cudaMemcpy( rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 be(( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols - 1) / t.x + 1 ), 2 * (( rows - 1 ) / t.y + 1 ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols - 1) / t.x + 1 ), 8 * (( rows - 1 ) / t.y + 1 ));
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
blurGauss<<< be, t >>>( rgb_d, out, cols, rows );
cudaMemcpy(g.data(), out, 3 * rows * cols, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlurGauss.jpg", m_out );
cudaFree( rgb_d);
//cudaFree( g_d);
cudaFree ( out);
return 0;
}
|
c73783ef6bbdb8888e5925fb100b266de2966b62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define N 2048
#define THREADS_PER_BLOCK 128
void checkCUDAError(const char*);
void random_ints(int *a);
__global__ void vectorAdd(int *a, int *b, int *c, int max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] - b[i];
}
int main(void) {
int *a, *b, *c, *c_ref; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int errors;
unsigned int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
checkCUDAError("CUDA malloc");
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a);
b = (int *)malloc(size); random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
checkCUDAError("CUDA memcpy");
// Launch add() kernel on GPU
vectorAdd << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_a, d_b, d_c, N);
checkCUDAError("CUDA kernel");
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
checkCUDAError("CUDA memcpy");
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
checkCUDAError("CUDA cleanup");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void random_ints(int *a)
{
for (unsigned int i = 0; i < N; i++){
a[i] = rand();
}
}
| c73783ef6bbdb8888e5925fb100b266de2966b62.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 2048
#define THREADS_PER_BLOCK 128
void checkCUDAError(const char*);
void random_ints(int *a);
__global__ void vectorAdd(int *a, int *b, int *c, int max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] - b[i];
}
int main(void) {
int *a, *b, *c, *c_ref; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int errors;
unsigned int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
checkCUDAError("CUDA malloc");
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a);
b = (int *)malloc(size); random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
checkCUDAError("CUDA memcpy");
// Launch add() kernel on GPU
vectorAdd << <N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_a, d_b, d_c, N);
checkCUDAError("CUDA kernel");
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
checkCUDAError("CUDA memcpy");
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
checkCUDAError("CUDA cleanup");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void random_ints(int *a)
{
for (unsigned int i = 0; i < N; i++){
a[i] = rand();
}
}
|
74f2232db003fa8a96862ac9880036d7e8018807.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// KERNEL
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
__global__ void kernel( int timeinst,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com){
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA indexes
int bx; // get current horizontal block index (0-n)
int tx; // get current horizontal thread index (0-n)
// pointers
int valu_offset; // inivalu and finavalu offset
int params_offset; // parameters offset
int com_offset; // kernel1-kernel2 communication offset
// module parameters
fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaSL; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] ***
//======================================================================================================================================================
// COMPUTATION
//======================================================================================================================================================
// CUDA indexes
bx = blockIdx.x; // get current horizontal block index (0-n)
tx = threadIdx.x; // get current horizontal thread index (0-n)
//=====================================================================
// ECC
//=====================================================================
// limit to useful threads
if(bx == 0){ // first processor runs ECC
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// thread offset
valu_offset = 0; //
// ecc function
kernel_ecc( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params);
}
}
//=====================================================================
// CAM x 3
//=====================================================================
// limit to useful threads
else if(bx == 1){ // second processor runs CAMs (in parallel with ECC)
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// specific
valu_offset = 46;
params_offset = 0;
com_offset = 0;
CaDyad = d_initvalu[35]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaDyad);
// specific
valu_offset = 61;
params_offset = 5;
com_offset = 1;
CaSL = d_initvalu[36]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaSL);
// specific
valu_offset = 76;
params_offset = 10;
com_offset = 2;
CaCyt = d_initvalu[37]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaCyt);
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// END OF KERNEL
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
}
| 74f2232db003fa8a96862ac9880036d7e8018807.cu | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// KERNEL
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
__global__ void kernel( int timeinst,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com){
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA indexes
int bx; // get current horizontal block index (0-n)
int tx; // get current horizontal thread index (0-n)
// pointers
int valu_offset; // inivalu and finavalu offset
int params_offset; // parameters offset
int com_offset; // kernel1-kernel2 communication offset
// module parameters
fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaSL; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] ***
//======================================================================================================================================================
// COMPUTATION
//======================================================================================================================================================
// CUDA indexes
bx = blockIdx.x; // get current horizontal block index (0-n)
tx = threadIdx.x; // get current horizontal thread index (0-n)
//=====================================================================
// ECC
//=====================================================================
// limit to useful threads
if(bx == 0){ // first processor runs ECC
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// thread offset
valu_offset = 0; //
// ecc function
kernel_ecc( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params);
}
}
//=====================================================================
// CAM x 3
//=====================================================================
// limit to useful threads
else if(bx == 1){ // second processor runs CAMs (in parallel with ECC)
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// specific
valu_offset = 46;
params_offset = 0;
com_offset = 0;
CaDyad = d_initvalu[35]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaDyad);
// specific
valu_offset = 61;
params_offset = 5;
com_offset = 1;
CaSL = d_initvalu[36]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaSL);
// specific
valu_offset = 76;
params_offset = 10;
com_offset = 2;
CaCyt = d_initvalu[37]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
d_initvalu,
d_finavalu,
valu_offset,
d_params,
params_offset,
d_com,
com_offset,
CaCyt);
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// END OF KERNEL
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
}
|
d9f8b1bdbe885a2ba421ccf7d366f10ad41dbd1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/unsorted_segment_sum_kernel_util.h"
#include "oneflow/core/kernel/kernel_util.cuh"
#include "oneflow/core/kernel/kernel.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetOutOffset(const IDX data_offset, const K* segment_ids, const IDX num_segment_ids,
const IDX num_segments, const IDX inner_dim_size,
const IDX segment_id_offset) {
const IDX outer_dim_elem_cnt = num_segment_ids * inner_dim_size;
const IDX outer_idx = data_offset / outer_dim_elem_cnt;
const IDX segment_id_idx = data_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = data_offset % inner_dim_size;
const K origin_idx = segment_ids[segment_id_idx];
assert(origin_idx >= 0);
const IDX idx = origin_idx - segment_id_offset;
if (idx >= 0 && idx < num_segments) {
return outer_idx * num_segments * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void UnsortedSegmentSumGpu(const IDX data_elem_cnt, const K* segment_ids,
const IDX num_segment_ids, const T* data,
const IDX num_segments, const IDX inner_dim_size, T* out,
const IDX segment_id_offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = data[i];
if (val != static_cast<T>(0)) {
const int64_t out_offset = GetOutOffset<K, IDX>(i, segment_ids, num_segment_ids, num_segments,
inner_dim_size, segment_id_offset);
if (out_offset >= 0) { gpu_atomic_add(out + out_offset, val); }
}
}
}
bool IsSafeUseIndex32(const int64_t num_segment_ids, const int64_t num_segments,
const int64_t outer_dim_size, const int64_t inner_dim_size) {
const int64_t data_elem_cnt = outer_dim_size * num_segment_ids * inner_dim_size;
const int64_t out_elem_cnt = outer_dim_size * num_segments * inner_dim_size;
return ::max(out_elem_cnt, data_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, T, K> final {
static void UnsortedSegmentSum(DeviceCtx* ctx, const K* segment_ids, const T* data,
int64_t num_segment_ids, int64_t num_segments,
int64_t outer_dim_size, int64_t inner_dim_size,
int64_t segment_id_offset, T* out) {
const int64_t data_elem_cnt = outer_dim_size * num_segment_ids * inner_dim_size;
if (IsSafeUseIndex32(num_segment_ids, num_segments, outer_dim_size, inner_dim_size)) {
hipLaunchKernelGGL(( UnsortedSegmentSumGpu<T, K, int32_t>)
, dim3(BlocksNum4ThreadsNum(data_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
data_elem_cnt, segment_ids, num_segment_ids, data, num_segments, inner_dim_size, out,
segment_id_offset);
} else {
hipLaunchKernelGGL(( UnsortedSegmentSumGpu<T, K, int64_t>)
, dim3(BlocksNum4ThreadsNum(data_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
data_elem_cnt, segment_ids, num_segment_ids, data, num_segments, inner_dim_size, out,
segment_id_offset);
}
}
};
template<typename K>
struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, float16, K> final {
static void UnsortedSegmentSum(DeviceCtx* ctx, const K* segment_ids, const float16* data,
int64_t num_segment_ids, int64_t num_segments,
int64_t outer_dim_size, int64_t inner_dim_size,
int64_t segment_id_offset, float16* out) {
UnsortedSegmentSumKernelUtil<DeviceType::kGPU, half, K>::UnsortedSegmentSum(
ctx, segment_ids, reinterpret_cast<const half*>(data), num_segment_ids, num_segments,
outer_dim_size, inner_dim_size, segment_id_offset, reinterpret_cast<half*>(out));
}
};
#define INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU,
UNSORTED_SEGMENT_SUM_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU
} // namespace oneflow
| d9f8b1bdbe885a2ba421ccf7d366f10ad41dbd1f.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/unsorted_segment_sum_kernel_util.h"
#include "oneflow/core/kernel/kernel_util.cuh"
#include "oneflow/core/kernel/kernel.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetOutOffset(const IDX data_offset, const K* segment_ids, const IDX num_segment_ids,
const IDX num_segments, const IDX inner_dim_size,
const IDX segment_id_offset) {
const IDX outer_dim_elem_cnt = num_segment_ids * inner_dim_size;
const IDX outer_idx = data_offset / outer_dim_elem_cnt;
const IDX segment_id_idx = data_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = data_offset % inner_dim_size;
const K origin_idx = segment_ids[segment_id_idx];
assert(origin_idx >= 0);
const IDX idx = origin_idx - segment_id_offset;
if (idx >= 0 && idx < num_segments) {
return outer_idx * num_segments * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void UnsortedSegmentSumGpu(const IDX data_elem_cnt, const K* segment_ids,
const IDX num_segment_ids, const T* data,
const IDX num_segments, const IDX inner_dim_size, T* out,
const IDX segment_id_offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = data[i];
if (val != static_cast<T>(0)) {
const int64_t out_offset = GetOutOffset<K, IDX>(i, segment_ids, num_segment_ids, num_segments,
inner_dim_size, segment_id_offset);
if (out_offset >= 0) { gpu_atomic_add(out + out_offset, val); }
}
}
}
bool IsSafeUseIndex32(const int64_t num_segment_ids, const int64_t num_segments,
const int64_t outer_dim_size, const int64_t inner_dim_size) {
const int64_t data_elem_cnt = outer_dim_size * num_segment_ids * inner_dim_size;
const int64_t out_elem_cnt = outer_dim_size * num_segments * inner_dim_size;
return std::max(out_elem_cnt, data_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, T, K> final {
static void UnsortedSegmentSum(DeviceCtx* ctx, const K* segment_ids, const T* data,
int64_t num_segment_ids, int64_t num_segments,
int64_t outer_dim_size, int64_t inner_dim_size,
int64_t segment_id_offset, T* out) {
const int64_t data_elem_cnt = outer_dim_size * num_segment_ids * inner_dim_size;
if (IsSafeUseIndex32(num_segment_ids, num_segments, outer_dim_size, inner_dim_size)) {
UnsortedSegmentSumGpu<T, K, int32_t>
<<<BlocksNum4ThreadsNum(data_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
data_elem_cnt, segment_ids, num_segment_ids, data, num_segments, inner_dim_size, out,
segment_id_offset);
} else {
UnsortedSegmentSumGpu<T, K, int64_t>
<<<BlocksNum4ThreadsNum(data_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
data_elem_cnt, segment_ids, num_segment_ids, data, num_segments, inner_dim_size, out,
segment_id_offset);
}
}
};
template<typename K>
struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, float16, K> final {
static void UnsortedSegmentSum(DeviceCtx* ctx, const K* segment_ids, const float16* data,
int64_t num_segment_ids, int64_t num_segments,
int64_t outer_dim_size, int64_t inner_dim_size,
int64_t segment_id_offset, float16* out) {
UnsortedSegmentSumKernelUtil<DeviceType::kGPU, half, K>::UnsortedSegmentSum(
ctx, segment_ids, reinterpret_cast<const half*>(data), num_segment_ids, num_segments,
outer_dim_size, inner_dim_size, segment_id_offset, reinterpret_cast<half*>(out));
}
};
#define INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct UnsortedSegmentSumKernelUtil<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU,
UNSORTED_SEGMENT_SUM_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_UNSORTED_SEGMENT_SUM_KERNEL_UTIL_GPU
} // namespace oneflow
|
b07df78bd2eec416088ea61da9b5374a11c2e382.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zsymmetrize_tiles.cu normal z -> s, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, magma_ceildiv( m, NB ) );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( ssymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
else {
hipLaunchKernelGGL(( ssymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_ssymmetrize_tiles_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_ssymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
| b07df78bd2eec416088ea61da9b5374a11c2e382.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zsymmetrize_tiles.cu normal z -> s, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, magma_ceildiv( m, NB ) );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
ssymmetrize_tiles_upper<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
else {
ssymmetrize_tiles_lower<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_ssymmetrize_tiles_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_ssymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
|
fb121ead77a31e386b37e44c7e1ff7209275b447.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/legacy/bfs_ref.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gmock/gmock.h>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w]; }
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_endpoints_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
result[source] += S.size() - 1;
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w] + 1; }
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_edge_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w]; }
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_betweenness_centrality_impl(vertex_t* indices,
edge_t* offsets,
vertex_t const number_of_vertices,
result_t* result,
bool endpoints,
vertex_t const* sources,
vertex_t const number_of_sources)
{
std::queue<vertex_t> Q;
std::stack<vertex_t> S;
// NOTE: dist is of type vertex_t not weight_t
std::vector<vertex_t> dist(number_of_vertices);
std::vector<std::vector<vertex_t>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<vertex_t> neighbors;
if (sources) {
for (vertex_t source_idx = 0; source_idx < number_of_sources; ++source_idx) {
vertex_t s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
if (endpoints) {
ref_endpoints_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
} else {
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
} else {
for (vertex_t s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
if (endpoints) {
ref_endpoints_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
} else {
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_rescale(result_t* result,
bool directed,
bool normalize,
bool endpoints,
vertex_t const number_of_vertices,
vertex_t const number_of_sources)
{
bool modified = false;
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_sources = static_cast<result_t>(number_of_sources);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 2) {
if (endpoints) {
rescale_factor /= (casted_number_of_vertices * (casted_number_of_vertices - 1));
} else {
rescale_factor /= ((casted_number_of_vertices - 1) * (casted_number_of_vertices - 2));
}
modified = true;
}
} else {
if (!directed) {
rescale_factor /= static_cast<result_t>(2);
modified = true;
}
}
if (modified) {
if (number_of_sources > 0) {
rescale_factor *= (casted_number_of_vertices / casted_number_of_sources);
}
}
for (auto idx = 0; idx < number_of_vertices; ++idx) {
result[idx] *= rescale_factor;
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_betweenness_centrality(
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
result_t* result,
bool normalize,
bool endpoints, // This is not yet implemented
vertex_t const number_of_sources,
vertex_t const* sources)
{
vertex_t number_of_vertices = graph.number_of_vertices;
edge_t number_of_edges = graph.number_of_edges;
thrust::host_vector<vertex_t> h_indices(number_of_edges);
thrust::host_vector<edge_t> h_offsets(number_of_vertices + 1);
thrust::device_ptr<vertex_t> d_indices((vertex_t*)&graph.indices[0]);
thrust::device_ptr<edge_t> d_offsets((edge_t*)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
hipDeviceSynchronize();
reference_betweenness_centrality_impl<vertex_t, edge_t, weight_t, result_t>(&h_indices[0],
&h_offsets[0],
number_of_vertices,
result,
endpoints,
sources,
number_of_sources);
reference_rescale<vertex_t, edge_t, weight_t, result_t>(
result, graph.prop.directed, normalize, endpoints, number_of_vertices, number_of_sources);
}
// Explicit instantiation
/* FIXME!!!
template void reference_betweenness_centrality<int, int, float, float>(
cugraph::legacy::GraphCSRView<int, int, float> const &,
float *,
bool,
bool,
const int,
int const *);
template void reference_betweenness_centrality<int, int, double, double>(
cugraph::legacy::GraphCSRView<int, int, double> const &,
double *,
bool,
bool,
const int,
int const *);
*/
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T& a, const T& b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct BC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
BC_Usecase_t(const std::string& config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} BC_Usecase;
class Tests_BC : public ::testing::TestWithParam<BC_Usecase> {
raft::handle_t handle;
public:
Tests_BC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// vertex_t vertex identifier data type
// edge_t edge identifier data type
// weight_t edge weight data type
// result_t result data type
// normalize should the result be normalized
// endpoints should the endpoints be included
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool normalize,
bool endpoints>
void run_current_test(const BC_Usecase& configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr = cugraph::test::generate_graph_csr_from_mm<vertex_t, edge_t, weight_t>(
is_directed, configuration.file_path_);
hipDeviceSynchronize();
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(hipGetLastError());
std::vector<result_t> result(G.number_of_vertices, 0);
std::vector<result_t> expected(G.number_of_vertices, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<vertex_t> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
vertex_t* sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_betweenness_centrality(
G, expected.data(), normalize, endpoints, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
rmm::device_vector<result_t> d_result(G.number_of_vertices);
cugraph::betweenness_centrality(handle,
G,
d_result.data().get(),
normalize,
endpoints,
static_cast<weight_t*>(nullptr),
configuration.number_of_sources_,
sources_ptr);
hipDeviceSynchronize();
CUDA_TRY(hipMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_vertices,
hipMemcpyDeviceToHost));
hipDeviceSynchronize();
for (int i = 0; i < G.number_of_vertices; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_BC, CheckFP32_NO_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, float, float, false, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_BC, CheckFP64_NO_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, double, double, false, false>(GetParam());
}
TEST_P(Tests_BC, CheckFP32_NO_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, float, float, false, true>(GetParam());
}
#endif
TEST_P(Tests_BC, CheckFP64_NO_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, double, double, false, true>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_BC, CheckFP32_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, float, float, true, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_BC, CheckFP64_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, double, double, true, false>(GetParam());
}
TEST_P(Tests_BC, CheckFP32_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, float, float, true, true>(GetParam());
}
#endif
TEST_P(Tests_BC, CheckFP64_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, double, double, true, true>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_BC,
::testing::Values(BC_Usecase("test/datasets/karate.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 4),
BC_Usecase("test/datasets/wiki2003.mtx", 4),
BC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
#else
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_BC,
::testing::Values(BC_Usecase("test/datasets/karate.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 4)));
#endif
CUGRAPH_TEST_PROGRAM_MAIN()
| fb121ead77a31e386b37e44c7e1ff7209275b447.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/legacy/bfs_ref.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gmock/gmock.h>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w]; }
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_endpoints_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
result[source] += S.size() - 1;
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w] + 1; }
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void ref_edge_accumulation(result_t* result,
vertex_t const number_of_vertices,
std::stack<vertex_t>& S,
std::vector<std::vector<vertex_t>>& pred,
std::vector<double>& sigmas,
std::vector<double>& deltas,
vertex_t source)
{
for (vertex_t v = 0; v < number_of_vertices; ++v) {
deltas[v] = 0;
}
while (!S.empty()) {
vertex_t w = S.top();
S.pop();
for (vertex_t v : pred[w]) {
deltas[v] += (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
}
if (w != source) { result[w] += deltas[w]; }
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_betweenness_centrality_impl(vertex_t* indices,
edge_t* offsets,
vertex_t const number_of_vertices,
result_t* result,
bool endpoints,
vertex_t const* sources,
vertex_t const number_of_sources)
{
std::queue<vertex_t> Q;
std::stack<vertex_t> S;
// NOTE: dist is of type vertex_t not weight_t
std::vector<vertex_t> dist(number_of_vertices);
std::vector<std::vector<vertex_t>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<vertex_t> neighbors;
if (sources) {
for (vertex_t source_idx = 0; source_idx < number_of_sources; ++source_idx) {
vertex_t s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
if (endpoints) {
ref_endpoints_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
} else {
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
} else {
for (vertex_t s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<vertex_t, edge_t>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
if (endpoints) {
ref_endpoints_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
} else {
ref_accumulation<vertex_t, edge_t, weight_t, result_t>(
result, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_rescale(result_t* result,
bool directed,
bool normalize,
bool endpoints,
vertex_t const number_of_vertices,
vertex_t const number_of_sources)
{
bool modified = false;
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_sources = static_cast<result_t>(number_of_sources);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 2) {
if (endpoints) {
rescale_factor /= (casted_number_of_vertices * (casted_number_of_vertices - 1));
} else {
rescale_factor /= ((casted_number_of_vertices - 1) * (casted_number_of_vertices - 2));
}
modified = true;
}
} else {
if (!directed) {
rescale_factor /= static_cast<result_t>(2);
modified = true;
}
}
if (modified) {
if (number_of_sources > 0) {
rescale_factor *= (casted_number_of_vertices / casted_number_of_sources);
}
}
for (auto idx = 0; idx < number_of_vertices; ++idx) {
result[idx] *= rescale_factor;
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename result_t>
void reference_betweenness_centrality(
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
result_t* result,
bool normalize,
bool endpoints, // This is not yet implemented
vertex_t const number_of_sources,
vertex_t const* sources)
{
vertex_t number_of_vertices = graph.number_of_vertices;
edge_t number_of_edges = graph.number_of_edges;
thrust::host_vector<vertex_t> h_indices(number_of_edges);
thrust::host_vector<edge_t> h_offsets(number_of_vertices + 1);
thrust::device_ptr<vertex_t> d_indices((vertex_t*)&graph.indices[0]);
thrust::device_ptr<edge_t> d_offsets((edge_t*)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
cudaDeviceSynchronize();
reference_betweenness_centrality_impl<vertex_t, edge_t, weight_t, result_t>(&h_indices[0],
&h_offsets[0],
number_of_vertices,
result,
endpoints,
sources,
number_of_sources);
reference_rescale<vertex_t, edge_t, weight_t, result_t>(
result, graph.prop.directed, normalize, endpoints, number_of_vertices, number_of_sources);
}
// Explicit instantiation
/* FIXME!!!
template void reference_betweenness_centrality<int, int, float, float>(
cugraph::legacy::GraphCSRView<int, int, float> const &,
float *,
bool,
bool,
const int,
int const *);
template void reference_betweenness_centrality<int, int, double, double>(
cugraph::legacy::GraphCSRView<int, int, double> const &,
double *,
bool,
bool,
const int,
int const *);
*/
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T& a, const T& b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct BC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
BC_Usecase_t(const std::string& config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} BC_Usecase;
class Tests_BC : public ::testing::TestWithParam<BC_Usecase> {
raft::handle_t handle;
public:
Tests_BC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// vertex_t vertex identifier data type
// edge_t edge identifier data type
// weight_t edge weight data type
// result_t result data type
// normalize should the result be normalized
// endpoints should the endpoints be included
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool normalize,
bool endpoints>
void run_current_test(const BC_Usecase& configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr = cugraph::test::generate_graph_csr_from_mm<vertex_t, edge_t, weight_t>(
is_directed, configuration.file_path_);
cudaDeviceSynchronize();
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(cudaGetLastError());
std::vector<result_t> result(G.number_of_vertices, 0);
std::vector<result_t> expected(G.number_of_vertices, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<vertex_t> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
vertex_t* sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_betweenness_centrality(
G, expected.data(), normalize, endpoints, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
rmm::device_vector<result_t> d_result(G.number_of_vertices);
cugraph::betweenness_centrality(handle,
G,
d_result.data().get(),
normalize,
endpoints,
static_cast<weight_t*>(nullptr),
configuration.number_of_sources_,
sources_ptr);
cudaDeviceSynchronize();
CUDA_TRY(cudaMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_vertices,
cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
for (int i = 0; i < G.number_of_vertices; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_BC, CheckFP32_NO_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, float, float, false, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_BC, CheckFP64_NO_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, double, double, false, false>(GetParam());
}
TEST_P(Tests_BC, CheckFP32_NO_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, float, float, false, true>(GetParam());
}
#endif
TEST_P(Tests_BC, CheckFP64_NO_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, double, double, false, true>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_BC, CheckFP32_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, float, float, true, false>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
TEST_P(Tests_BC, CheckFP64_NORMALIZE_NO_ENDPOINTS)
{
run_current_test<int, int, double, double, true, false>(GetParam());
}
TEST_P(Tests_BC, CheckFP32_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, float, float, true, true>(GetParam());
}
#endif
TEST_P(Tests_BC, CheckFP64_NORMALIZE_ENDPOINTS)
{
run_current_test<int, int, double, double, true, true>(GetParam());
}
#if 0
// Temporarily disable some of the test combinations
// Full solution will be explored for issue #1555
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_BC,
::testing::Values(BC_Usecase("test/datasets/karate.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 4),
BC_Usecase("test/datasets/wiki2003.mtx", 4),
BC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
#else
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_BC,
::testing::Values(BC_Usecase("test/datasets/karate.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 0),
BC_Usecase("test/datasets/netscience.mtx", 4)));
#endif
CUGRAPH_TEST_PROGRAM_MAIN()
|
672a51b3e12052cf55ef198d762d550580008794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void RGBToBGRA8(float3* srcImage, uchar4* dstImage, int width, int height, float scaling_factor)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
const float3 px = srcImage[pixel];
dstImage[pixel] = make_uchar4(px.z * scaling_factor,
px.y * scaling_factor,
px.x * scaling_factor,
255.0f * scaling_factor);
} | 672a51b3e12052cf55ef198d762d550580008794.cu | #include "includes.h"
__global__ void RGBToBGRA8(float3* srcImage, uchar4* dstImage, int width, int height, float scaling_factor)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
const float3 px = srcImage[pixel];
dstImage[pixel] = make_uchar4(px.z * scaling_factor,
px.y * scaling_factor,
px.x * scaling_factor,
255.0f * scaling_factor);
} |
993236d0ee01e7c45afa2e00651c50bfdbe2a955.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_trunc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_trunc), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_trunc), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_trunc), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 993236d0ee01e7c45afa2e00651c50bfdbe2a955.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_trunc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_trunc<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_trunc<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_trunc<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
642bbb40028a95b974ca75ddcd25b40a1bac35f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the OP2 distribution.
*
* Copyright (c) 2011, Mike Giles and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Mike Giles may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MPICH_IGNORE_CXX_SEEK
#include <op_lib_mpi.h>
#include <op_lib_c.h>
#include <op_cuda_rt_support.h>
#include <vector>
#include <algorithm>
__global__ void export_halo_gather(int *list, char *dat, int copy_size,
int elem_size, char *export_buffer) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < copy_size) {
int off = 0;
if (elem_size % 16 == 0) {
off += 16 * (elem_size / 16);
for (int i = 0; i < elem_size / 16; i++) {
((double2 *)(export_buffer + id * elem_size))[i] =
((double2 *)(dat + list[id] * elem_size))[i];
}
} else if (elem_size % 8 == 0) {
off += 8 * (elem_size / 8);
for (int i = 0; i < elem_size / 8; i++) {
((double *)(export_buffer + id * elem_size))[i] =
((double *)(dat + list[id] * elem_size))[i];
}
}
for (int i = off; i < elem_size; i++) {
export_buffer[id * elem_size + i] = dat[list[id] * elem_size + i];
}
}
}
__global__ void export_halo_gather_soa(int *list, char *dat, int copy_size,
int elem_size, char *export_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(export_buffer + id * elem_size))[i] =
((double *)(dat + list[id] * size_of))[i * set_size];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
export_buffer[id * elem_size + i * size_of + j] =
dat[list[id] * size_of + i * set_size * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_soa(int offset, char *dat, int copy_size,
int elem_size, char *import_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + (offset + id) * size_of))[i * set_size] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[(offset + id) * size_of + i * set_size * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_partial_soa(int *list, char *dat,
int copy_size, int elem_size,
char *import_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
int element = list[id];
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + (element)*size_of))[i * set_size] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[(element)*size_of + i * set_size * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_partial(int *list, char *dat, int copy_size,
int elem_size, char *import_buffer,
int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
int element = list[id];
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + element * elem_size))[i] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[element * elem_size + i * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
void gather_data_to_buffer(op_arg arg, halo_list exp_exec_list,
halo_list exp_nonexec_list) {
int threads = 192;
int blocks = 1 + ((exp_exec_list->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks), dim3(threads), 0, 0,
export_exec_list_d[arg.dat->set->index], arg.data_d,
exp_exec_list->size, arg.dat->size, arg.dat->buffer_d, set_size,
arg.dat->dim);
int blocks2 = 1 + ((exp_nonexec_list->size - 1) / 192);
hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks2), dim3(threads), 0, 0,
export_nonexec_list_d[arg.dat->set->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size,
arg.dat->buffer_d + exp_exec_list->size * arg.dat->size, set_size,
arg.dat->dim);
} else {
hipLaunchKernelGGL(( export_halo_gather), dim3(blocks), dim3(threads), 0, 0,
export_exec_list_d[arg.dat->set->index], arg.data_d,
exp_exec_list->size, arg.dat->size, arg.dat->buffer_d);
int blocks2 = 1 + ((exp_nonexec_list->size - 1) / 192);
hipLaunchKernelGGL(( export_halo_gather), dim3(blocks2), dim3(threads), 0, 0,
export_nonexec_list_d[arg.dat->set->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size,
arg.dat->buffer_d + exp_exec_list->size * arg.dat->size);
}
}
void gather_data_to_buffer_partial(op_arg arg, halo_list exp_nonexec_list) {
int threads = 192;
int blocks = 1 + ((exp_nonexec_list->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks), dim3(threads), 0, 0,
export_nonexec_list_partial_d[arg.map->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d, set_size,
arg.dat->dim);
} else {
hipLaunchKernelGGL(( export_halo_gather), dim3(blocks), dim3(threads), 0, 0,
export_nonexec_list_partial_d[arg.map->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d);
}
}
void scatter_data_from_buffer(op_arg arg) {
int threads = 192;
int blocks = 1 + ((arg.dat->set->exec_size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
int offset = arg.dat->set->size;
int copy_size = arg.dat->set->exec_size;
hipLaunchKernelGGL(( import_halo_scatter_soa), dim3(blocks), dim3(threads), 0, 0,
offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r,
set_size, arg.dat->dim);
offset += arg.dat->set->exec_size;
copy_size = arg.dat->set->nonexec_size;
int blocks2 = 1 + ((arg.dat->set->nonexec_size - 1) / 192);
hipLaunchKernelGGL(( import_halo_scatter_soa), dim3(blocks2), dim3(threads), 0, 0,
offset, arg.data_d, copy_size, arg.dat->size,
arg.dat->buffer_d_r + arg.dat->set->exec_size * arg.dat->size, set_size,
arg.dat->dim);
}
}
void scatter_data_from_buffer_partial(op_arg arg) {
int threads = 192;
int blocks = 1 + ((OP_import_nonexec_permap[arg.map->index]->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
int init = OP_export_nonexec_permap[arg.map->index]->size;
int copy_size = OP_import_nonexec_permap[arg.map->index]->size;
hipLaunchKernelGGL(( import_halo_scatter_partial_soa), dim3(blocks), dim3(threads), 0, 0,
import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size,
arg.dat->size, arg.dat->buffer_d + init * arg.dat->size, set_size,
arg.dat->dim);
} else {
int init = OP_export_nonexec_permap[arg.map->index]->size;
int copy_size = OP_import_nonexec_permap[arg.map->index]->size;
hipLaunchKernelGGL(( import_halo_scatter_partial), dim3(blocks), dim3(threads), 0, 0,
import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size,
arg.dat->size, arg.dat->buffer_d + init * arg.dat->size, arg.dat->dim);
}
}
__device__ int lower_bound(int *disps, int count, int value) {
int *it;
int *first = disps;
int step;
while (count > 0) {
it = first;
step = count / 2;
it += step;
if (*it < value) {
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first-disps;
}
__global__ void gather_data_to_buffer_ptr_cuda_kernel(const char *__restrict data, char *__restrict buffer, int *elem_list, int *disps,
unsigned *neigh_to_neigh_offsets, int rank_size, int soa, int type_size, int dim, int set_size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= disps[rank_size]) return;
int neighbour = lower_bound(disps, rank_size, id);
if (disps[neighbour]!=id) neighbour--;
unsigned buf_pos = neigh_to_neigh_offsets[neighbour];
unsigned set_elem_index = elem_list[id];
if (soa) {
for (int d = 0; d < dim; d++)
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size)%8==0)
*(double*)&buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size] = *(double*)&data[(d*set_size + set_elem_index)*type_size];
else
for (int p = 0; p < type_size; p++)
buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size + p] = data[(d*set_size + set_elem_index)*type_size + p];
} else {
int dat_size = type_size * dim;
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * dat_size)%8==0)
for (int d = 0; d < dim; d++)
*(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size + d*type_size] = *(double*)&data[set_elem_index*dat_size + d*type_size];
else
for (int p = 0; p < dat_size; p++)
buffer[buf_pos + (id - disps[neighbour]) * dat_size + p] = data[set_elem_index*dat_size + p];
}
}
__global__ void scatter_data_from_buffer_ptr_cuda_kernel(char * __restrict data, const char * __restrict buffer, int *disps,
unsigned *neigh_to_neigh_offsets, int rank_size, int soa, int type_size, int dim, int set_size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= disps[rank_size]) return;
int neighbour = lower_bound(disps, rank_size, id);
if (disps[neighbour]!=id) neighbour--;
unsigned buf_pos = neigh_to_neigh_offsets[neighbour];
if (soa) {
for (int d = 0; d < dim; d++)
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size)%8==0)
*(double*)&data[(d*set_size + id)*type_size] = *(double*)&buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size];
else
for (int p = 0; p < type_size; p++)
data[(d*set_size + id)*type_size + p] = buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size + p];
} else {
int dat_size = type_size * dim;
// if (*(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size] != *(double*)&data[id*dat_size])
// printf("Mismatch\n");
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * dat_size)%8==0)
for (int d = 0; d < dim; d++)
*(double*)&data[id*dat_size + d*type_size] = *(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size + d*type_size];
else
for (int p = 0; p < dat_size; p++)
data[id*dat_size + p] = buffer[buf_pos + (id - disps[neighbour]) * dat_size + p];
}
}
unsigned *op2_grp_neigh_to_neigh_offsets_h = NULL;
unsigned *op2_grp_neigh_to_neigh_offsets_d = NULL;
int op2_grp_max_gathers = 10;
extern int op2_grp_counter;
int op2_grp_max_neighbours = 0;
void check_realloc_buffer() {
//Figure out how much space may need at most
if (op2_grp_neigh_to_neigh_offsets_h == NULL) {
for (int i = 0; i < OP_set_index; i++) {
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_export_exec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_export_nonexec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_import_exec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_import_nonexec_list[i]->ranks_size);
}
//Need host buffers for each dat in flight
cutilSafeCall(hipHostMalloc(&op2_grp_neigh_to_neigh_offsets_h, op2_grp_max_gathers * op2_grp_max_neighbours * sizeof(unsigned)));
//But just one device buffer if gather kernels are sequential
cutilSafeCall(hipMalloc (&op2_grp_neigh_to_neigh_offsets_d, op2_grp_max_neighbours * sizeof(unsigned)));
}
if (op2_grp_counter >= op2_grp_max_gathers) {
cutilSafeCall(hipDeviceSynchronize());
cutilSafeCall(hipHostFree(op2_grp_neigh_to_neigh_offsets_h));
op2_grp_max_gathers *= 2;
cutilSafeCall(hipHostMalloc(&op2_grp_neigh_to_neigh_offsets_h, op2_grp_max_gathers * op2_grp_max_neighbours * sizeof(unsigned)));
}
}
void gather_data_to_buffer_ptr_cuda(op_arg arg, halo_list eel, halo_list enl, char *buffer,
std::vector<int>& neigh_list, std::vector<unsigned>& neigh_offsets) {
check_realloc_buffer();
int soa = 0;
if ((OP_auto_soa && arg.dat->dim > 1) || strstr(arg.dat->type, ":soa") != NULL) soa = 1;
//Exec halo
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < eel->ranks_size; i++) {
int dest_rank = eel->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += eel->sizes[i] * arg.dat->size;
}
//Async upload
hipMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],eel->ranks_size * sizeof(unsigned),hipMemcpyHostToDevice);
//Launch kernel
hipLaunchKernelGGL(( gather_data_to_buffer_ptr_cuda_kernel), dim3(1 + ((eel->size - 1) / 192)),dim3(192), 0, 0, arg.dat->data_d, buffer, export_exec_list_d[arg.dat->set->index], export_exec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, eel->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
//Same for nonexec
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < enl->ranks_size; i++) {
int dest_rank = enl->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += enl->sizes[i] * arg.dat->size;
}
//Async upload
hipMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],enl->ranks_size * sizeof(unsigned),hipMemcpyHostToDevice);
//Launch kernel
hipLaunchKernelGGL(( gather_data_to_buffer_ptr_cuda_kernel), dim3(1 + ((enl->size - 1) / 192)),dim3(192), 0, 0, arg.dat->data_d, buffer, export_nonexec_list_d[arg.dat->set->index], export_nonexec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, enl->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
}
void scatter_data_from_buffer_ptr_cuda(op_arg arg, halo_list iel, halo_list inl, char *buffer,
std::vector<int>& neigh_list, std::vector<unsigned>& neigh_offsets) {
check_realloc_buffer();
int soa = 0;
if ((OP_auto_soa && arg.dat->dim > 1) || strstr(arg.dat->type, ":soa") != NULL) soa = 1;
//Exec halo
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < iel->ranks_size; i++) {
int dest_rank = iel->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += iel->sizes[i] * arg.dat->size;
}
//Async upload
hipMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],iel->ranks_size * sizeof(unsigned),hipMemcpyHostToDevice,op2_grp_secondary);
//Launch kernel
unsigned offset = arg.dat->set->size * (soa?arg.dat->size/arg.dat->dim:arg.dat->size);
hipLaunchKernelGGL(( scatter_data_from_buffer_ptr_cuda_kernel), dim3(1 + ((iel->size - 1) / 192)),dim3(192),0,op2_grp_secondary, arg.dat->data_d+offset, buffer, import_exec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, iel->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
//Same for nonexec
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < inl->ranks_size; i++) {
int dest_rank = inl->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += inl->sizes[i] * arg.dat->size;
}
//Async upload
hipMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],inl->ranks_size * sizeof(unsigned),hipMemcpyHostToDevice,op2_grp_secondary);
//Launch kernel
offset = (arg.dat->set->size + iel->size) * (soa?arg.dat->size/arg.dat->dim:arg.dat->size);
hipLaunchKernelGGL(( scatter_data_from_buffer_ptr_cuda_kernel), dim3(1 + ((inl->size - 1) / 192)),dim3(192),0,op2_grp_secondary, arg.dat->data_d+offset, buffer, import_nonexec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, inl->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
} | 642bbb40028a95b974ca75ddcd25b40a1bac35f3.cu | /*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the OP2 distribution.
*
* Copyright (c) 2011, Mike Giles and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Mike Giles may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MPICH_IGNORE_CXX_SEEK
#include <op_lib_mpi.h>
#include <op_lib_c.h>
#include <op_cuda_rt_support.h>
#include <vector>
#include <algorithm>
__global__ void export_halo_gather(int *list, char *dat, int copy_size,
int elem_size, char *export_buffer) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < copy_size) {
int off = 0;
if (elem_size % 16 == 0) {
off += 16 * (elem_size / 16);
for (int i = 0; i < elem_size / 16; i++) {
((double2 *)(export_buffer + id * elem_size))[i] =
((double2 *)(dat + list[id] * elem_size))[i];
}
} else if (elem_size % 8 == 0) {
off += 8 * (elem_size / 8);
for (int i = 0; i < elem_size / 8; i++) {
((double *)(export_buffer + id * elem_size))[i] =
((double *)(dat + list[id] * elem_size))[i];
}
}
for (int i = off; i < elem_size; i++) {
export_buffer[id * elem_size + i] = dat[list[id] * elem_size + i];
}
}
}
__global__ void export_halo_gather_soa(int *list, char *dat, int copy_size,
int elem_size, char *export_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(export_buffer + id * elem_size))[i] =
((double *)(dat + list[id] * size_of))[i * set_size];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
export_buffer[id * elem_size + i * size_of + j] =
dat[list[id] * size_of + i * set_size * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_soa(int offset, char *dat, int copy_size,
int elem_size, char *import_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + (offset + id) * size_of))[i * set_size] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[(offset + id) * size_of + i * set_size * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_partial_soa(int *list, char *dat,
int copy_size, int elem_size,
char *import_buffer,
int set_size, int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
int element = list[id];
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + (element)*size_of))[i * set_size] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[(element)*size_of + i * set_size * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
__global__ void import_halo_scatter_partial(int *list, char *dat, int copy_size,
int elem_size, char *import_buffer,
int dim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size_of = elem_size / dim;
if (id < copy_size) {
int element = list[id];
if (size_of == 8) {
for (int i = 0; i < dim; i++) {
((double *)(dat + element * elem_size))[i] =
((double *)(import_buffer + id * elem_size))[i];
}
} else {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < size_of; j++) {
dat[element * elem_size + i * size_of + j] =
import_buffer[id * elem_size + i * size_of + j];
}
}
}
}
}
void gather_data_to_buffer(op_arg arg, halo_list exp_exec_list,
halo_list exp_nonexec_list) {
int threads = 192;
int blocks = 1 + ((exp_exec_list->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
export_halo_gather_soa<<<blocks, threads>>>(
export_exec_list_d[arg.dat->set->index], arg.data_d,
exp_exec_list->size, arg.dat->size, arg.dat->buffer_d, set_size,
arg.dat->dim);
int blocks2 = 1 + ((exp_nonexec_list->size - 1) / 192);
export_halo_gather_soa<<<blocks2, threads>>>(
export_nonexec_list_d[arg.dat->set->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size,
arg.dat->buffer_d + exp_exec_list->size * arg.dat->size, set_size,
arg.dat->dim);
} else {
export_halo_gather<<<blocks, threads>>>(
export_exec_list_d[arg.dat->set->index], arg.data_d,
exp_exec_list->size, arg.dat->size, arg.dat->buffer_d);
int blocks2 = 1 + ((exp_nonexec_list->size - 1) / 192);
export_halo_gather<<<blocks2, threads>>>(
export_nonexec_list_d[arg.dat->set->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size,
arg.dat->buffer_d + exp_exec_list->size * arg.dat->size);
}
}
void gather_data_to_buffer_partial(op_arg arg, halo_list exp_nonexec_list) {
int threads = 192;
int blocks = 1 + ((exp_nonexec_list->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
export_halo_gather_soa<<<blocks, threads>>>(
export_nonexec_list_partial_d[arg.map->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d, set_size,
arg.dat->dim);
} else {
export_halo_gather<<<blocks, threads>>>(
export_nonexec_list_partial_d[arg.map->index], arg.data_d,
exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d);
}
}
void scatter_data_from_buffer(op_arg arg) {
int threads = 192;
int blocks = 1 + ((arg.dat->set->exec_size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
int offset = arg.dat->set->size;
int copy_size = arg.dat->set->exec_size;
import_halo_scatter_soa<<<blocks, threads>>>(
offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r,
set_size, arg.dat->dim);
offset += arg.dat->set->exec_size;
copy_size = arg.dat->set->nonexec_size;
int blocks2 = 1 + ((arg.dat->set->nonexec_size - 1) / 192);
import_halo_scatter_soa<<<blocks2, threads>>>(
offset, arg.data_d, copy_size, arg.dat->size,
arg.dat->buffer_d_r + arg.dat->set->exec_size * arg.dat->size, set_size,
arg.dat->dim);
}
}
void scatter_data_from_buffer_partial(op_arg arg) {
int threads = 192;
int blocks = 1 + ((OP_import_nonexec_permap[arg.map->index]->size - 1) / 192);
if (strstr(arg.dat->type, ":soa") != NULL ||
(OP_auto_soa && arg.dat->dim > 1)) {
int set_size = arg.dat->set->size + arg.dat->set->exec_size +
arg.dat->set->nonexec_size;
int init = OP_export_nonexec_permap[arg.map->index]->size;
int copy_size = OP_import_nonexec_permap[arg.map->index]->size;
import_halo_scatter_partial_soa<<<blocks, threads>>>(
import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size,
arg.dat->size, arg.dat->buffer_d + init * arg.dat->size, set_size,
arg.dat->dim);
} else {
int init = OP_export_nonexec_permap[arg.map->index]->size;
int copy_size = OP_import_nonexec_permap[arg.map->index]->size;
import_halo_scatter_partial<<<blocks, threads>>>(
import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size,
arg.dat->size, arg.dat->buffer_d + init * arg.dat->size, arg.dat->dim);
}
}
__device__ int lower_bound(int *disps, int count, int value) {
int *it;
int *first = disps;
int step;
while (count > 0) {
it = first;
step = count / 2;
it += step;
if (*it < value) {
first = ++it;
count -= step + 1;
}
else
count = step;
}
return first-disps;
}
__global__ void gather_data_to_buffer_ptr_cuda_kernel(const char *__restrict data, char *__restrict buffer, int *elem_list, int *disps,
unsigned *neigh_to_neigh_offsets, int rank_size, int soa, int type_size, int dim, int set_size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= disps[rank_size]) return;
int neighbour = lower_bound(disps, rank_size, id);
if (disps[neighbour]!=id) neighbour--;
unsigned buf_pos = neigh_to_neigh_offsets[neighbour];
unsigned set_elem_index = elem_list[id];
if (soa) {
for (int d = 0; d < dim; d++)
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size)%8==0)
*(double*)&buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size] = *(double*)&data[(d*set_size + set_elem_index)*type_size];
else
for (int p = 0; p < type_size; p++)
buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size + p] = data[(d*set_size + set_elem_index)*type_size + p];
} else {
int dat_size = type_size * dim;
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * dat_size)%8==0)
for (int d = 0; d < dim; d++)
*(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size + d*type_size] = *(double*)&data[set_elem_index*dat_size + d*type_size];
else
for (int p = 0; p < dat_size; p++)
buffer[buf_pos + (id - disps[neighbour]) * dat_size + p] = data[set_elem_index*dat_size + p];
}
}
__global__ void scatter_data_from_buffer_ptr_cuda_kernel(char * __restrict data, const char * __restrict buffer, int *disps,
unsigned *neigh_to_neigh_offsets, int rank_size, int soa, int type_size, int dim, int set_size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= disps[rank_size]) return;
int neighbour = lower_bound(disps, rank_size, id);
if (disps[neighbour]!=id) neighbour--;
unsigned buf_pos = neigh_to_neigh_offsets[neighbour];
if (soa) {
for (int d = 0; d < dim; d++)
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size)%8==0)
*(double*)&data[(d*set_size + id)*type_size] = *(double*)&buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size];
else
for (int p = 0; p < type_size; p++)
data[(d*set_size + id)*type_size + p] = buffer[buf_pos + (id - disps[neighbour]) * type_size * dim + d * type_size + p];
} else {
int dat_size = type_size * dim;
// if (*(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size] != *(double*)&data[id*dat_size])
// printf("Mismatch\n");
if (type_size == 8 && (buf_pos + (id - disps[neighbour]) * dat_size)%8==0)
for (int d = 0; d < dim; d++)
*(double*)&data[id*dat_size + d*type_size] = *(double*)&buffer[buf_pos + (id - disps[neighbour]) * dat_size + d*type_size];
else
for (int p = 0; p < dat_size; p++)
data[id*dat_size + p] = buffer[buf_pos + (id - disps[neighbour]) * dat_size + p];
}
}
unsigned *op2_grp_neigh_to_neigh_offsets_h = NULL;
unsigned *op2_grp_neigh_to_neigh_offsets_d = NULL;
int op2_grp_max_gathers = 10;
extern int op2_grp_counter;
int op2_grp_max_neighbours = 0;
void check_realloc_buffer() {
//Figure out how much space may need at most
if (op2_grp_neigh_to_neigh_offsets_h == NULL) {
for (int i = 0; i < OP_set_index; i++) {
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_export_exec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_export_nonexec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_import_exec_list[i]->ranks_size);
op2_grp_max_neighbours = MAX(op2_grp_max_neighbours,OP_import_nonexec_list[i]->ranks_size);
}
//Need host buffers for each dat in flight
cutilSafeCall(cudaMallocHost(&op2_grp_neigh_to_neigh_offsets_h, op2_grp_max_gathers * op2_grp_max_neighbours * sizeof(unsigned)));
//But just one device buffer if gather kernels are sequential
cutilSafeCall(cudaMalloc (&op2_grp_neigh_to_neigh_offsets_d, op2_grp_max_neighbours * sizeof(unsigned)));
}
if (op2_grp_counter >= op2_grp_max_gathers) {
cutilSafeCall(cudaDeviceSynchronize());
cutilSafeCall(cudaFreeHost(op2_grp_neigh_to_neigh_offsets_h));
op2_grp_max_gathers *= 2;
cutilSafeCall(cudaMallocHost(&op2_grp_neigh_to_neigh_offsets_h, op2_grp_max_gathers * op2_grp_max_neighbours * sizeof(unsigned)));
}
}
void gather_data_to_buffer_ptr_cuda(op_arg arg, halo_list eel, halo_list enl, char *buffer,
std::vector<int>& neigh_list, std::vector<unsigned>& neigh_offsets) {
check_realloc_buffer();
int soa = 0;
if ((OP_auto_soa && arg.dat->dim > 1) || strstr(arg.dat->type, ":soa") != NULL) soa = 1;
//Exec halo
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < eel->ranks_size; i++) {
int dest_rank = eel->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += eel->sizes[i] * arg.dat->size;
}
//Async upload
cudaMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],eel->ranks_size * sizeof(unsigned),cudaMemcpyHostToDevice);
//Launch kernel
gather_data_to_buffer_ptr_cuda_kernel<<<1 + ((eel->size - 1) / 192),192>>>(arg.dat->data_d, buffer, export_exec_list_d[arg.dat->set->index], export_exec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, eel->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
//Same for nonexec
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < enl->ranks_size; i++) {
int dest_rank = enl->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += enl->sizes[i] * arg.dat->size;
}
//Async upload
cudaMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],enl->ranks_size * sizeof(unsigned),cudaMemcpyHostToDevice);
//Launch kernel
gather_data_to_buffer_ptr_cuda_kernel<<<1 + ((enl->size - 1) / 192),192>>>(arg.dat->data_d, buffer, export_nonexec_list_d[arg.dat->set->index], export_nonexec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, enl->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
}
void scatter_data_from_buffer_ptr_cuda(op_arg arg, halo_list iel, halo_list inl, char *buffer,
std::vector<int>& neigh_list, std::vector<unsigned>& neigh_offsets) {
check_realloc_buffer();
int soa = 0;
if ((OP_auto_soa && arg.dat->dim > 1) || strstr(arg.dat->type, ":soa") != NULL) soa = 1;
//Exec halo
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < iel->ranks_size; i++) {
int dest_rank = iel->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += iel->sizes[i] * arg.dat->size;
}
//Async upload
cudaMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],iel->ranks_size * sizeof(unsigned),cudaMemcpyHostToDevice,op2_grp_secondary);
//Launch kernel
unsigned offset = arg.dat->set->size * (soa?arg.dat->size/arg.dat->dim:arg.dat->size);
scatter_data_from_buffer_ptr_cuda_kernel<<<1 + ((iel->size - 1) / 192),192,0,op2_grp_secondary>>>(arg.dat->data_d+offset, buffer, import_exec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, iel->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
//Same for nonexec
//Create op2_grp_neigh_to_neigh_offsets_h into appropriate position
for (int i = 0; i < inl->ranks_size; i++) {
int dest_rank = inl->ranks[i];
int buf_rankpos = std::distance(neigh_list.begin(),std::lower_bound(neigh_list.begin(), neigh_list.end(), dest_rank));
op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours+i] = neigh_offsets[buf_rankpos];
neigh_offsets[buf_rankpos] += inl->sizes[i] * arg.dat->size;
}
//Async upload
cudaMemcpyAsync(op2_grp_neigh_to_neigh_offsets_d,&op2_grp_neigh_to_neigh_offsets_h[op2_grp_counter*op2_grp_max_neighbours],inl->ranks_size * sizeof(unsigned),cudaMemcpyHostToDevice,op2_grp_secondary);
//Launch kernel
offset = (arg.dat->set->size + iel->size) * (soa?arg.dat->size/arg.dat->dim:arg.dat->size);
scatter_data_from_buffer_ptr_cuda_kernel<<<1 + ((inl->size - 1) / 192),192,0,op2_grp_secondary>>>(arg.dat->data_d+offset, buffer, import_nonexec_list_disps_d[arg.dat->set->index],
op2_grp_neigh_to_neigh_offsets_d, inl->ranks_size, soa, arg.dat->size/arg.dat->dim, arg.dat->dim, arg.dat->set->size+arg.dat->set->exec_size+arg.dat->set->nonexec_size);
op2_grp_counter++;
} |
e432c98b23c6f976032ee6163c03515af6828ef1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <../src/vec/is/sf/impls/basic/sfpack.h>
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt, PetscInt tid)
{
PetscInt i, j, k, m, n, r;
const PetscInt *offset, *start, *dx, *dy, *X, *Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2 * n + 2;
dy = opt + 3 * n + 2;
X = opt + 5 * n + 2;
Y = opt + 6 * n + 2;
for (r = 0; r < n; r++) {
if (tid < offset[r + 1]) break;
}
m = (tid - offset[r]);
k = m / (dx[r] * dy[r]);
j = (m - k * dx[r] * dy[r]) / dx[r];
i = m - k * dx[r] * dy[r] - j * dx[r];
return (start[r] + k * X[r] * Y[r] + j * X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template <class Type, PetscInt BS, PetscInt EQ>
__global__ static void d_Pack(PetscInt bs, PetscInt count, PetscInt start, const PetscInt *opt, const PetscInt *idx, const Type *data, Type *buf)
{
PetscInt i, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M * BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid < count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt ? MapTidToIndex(opt, tid) : (idx ? idx[tid] : start + tid)) * MBS;
s = tid * MBS;
for (i = 0; i < MBS; i++) buf[s + i] = data[t + i];
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs, PetscInt count, PetscInt start, const PetscInt *opt, const PetscInt *idx, Type *data, const Type *buf)
{
PetscInt i, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
t = (opt ? MapTidToIndex(opt, tid) : (idx ? idx[tid] : start + tid)) * MBS;
s = tid * MBS;
for (i = 0; i < MBS; i++) op(data[t + i], buf[s + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs, PetscInt count, PetscInt rootstart, const PetscInt *rootopt, const PetscInt *rootidx, Type *rootdata, Type *leafbuf)
{
PetscInt i, l, r, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
r = (rootopt ? MapTidToIndex(rootopt, tid) : (rootidx ? rootidx[tid] : rootstart + tid)) * MBS;
l = tid * MBS;
for (i = 0; i < MBS; i++) leafbuf[l + i] = op(rootdata[r + i], leafbuf[l + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs, PetscInt count, PetscInt srcx, PetscInt srcy, PetscInt srcX, PetscInt srcY, PetscInt srcStart, const PetscInt *srcIdx, const Type *src, PetscInt dstx, PetscInt dsty, PetscInt dstX, PetscInt dstY, PetscInt dstStart, const PetscInt *dstIdx, Type *dst)
{
PetscInt i, j, k, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid / (srcx * srcy);
j = (tid - k * srcx * srcy) / srcx;
i = tid - k * srcx * srcy - j * srcx;
s = srcStart + k * srcX * srcY + j * srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid / (dstx * dsty);
j = (tid - k * dstx * dsty) / dstx;
i = tid - k * dstx * dsty - j * dstx;
t = dstStart + k * dstX * dstY + j * dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i = 0; i < MBS; i++) op(dst[t + i], src[s + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs, PetscInt count, PetscInt rootstart, const PetscInt *rootopt, const PetscInt *rootidx, Type *rootdata, PetscInt leafstart, const PetscInt *leafopt, const PetscInt *leafidx, const Type *leafdata, Type *leafupdate)
{
PetscInt i, l, r, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
r = (rootopt ? MapTidToIndex(rootopt, tid) : (rootidx ? rootidx[tid] : rootstart + tid)) * MBS;
l = (leafopt ? MapTidToIndex(leafopt, tid) : (leafidx ? leafidx[tid] : leafstart + tid)) * MBS;
for (i = 0; i < MBS; i++) leafupdate[l + i] = op(rootdata[r + i], leafdata[l + i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template <typename Type>
struct Insert {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = y;
return old;
}
};
template <typename Type>
struct Add {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x += y;
return old;
}
};
template <typename Type>
struct Mult {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x *= y;
return old;
}
};
template <typename Type>
struct Min {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = PetscMin(x, y);
return old;
}
};
template <typename Type>
struct Max {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = PetscMax(x, y);
return old;
}
};
template <typename Type>
struct LAND {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x && y;
return old;
}
};
template <typename Type>
struct LOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x || y;
return old;
}
};
template <typename Type>
struct LXOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = !x != !y;
return old;
}
};
template <typename Type>
struct BAND {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x & y;
return old;
}
};
template <typename Type>
struct BOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x | y;
return old;
}
};
template <typename Type>
struct BXOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x ^ y;
return old;
}
};
template <typename Type>
struct Minloc {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b, y.b);
return old;
}
};
template <typename Type>
struct Maxloc {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b, y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPI_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double *address, double val)
{
return __longlong_as_double(atomicExch((ullint *)address, __double_as_longlong(val)));
}
__device__ static llint atomicExch(llint *address, llint val)
{
return (llint)(atomicExch((ullint *)address, (ullint)val));
}
template <typename Type>
struct AtomicInsert {
__device__ Type operator()(Type &x, Type y) const { return atomicExch(&x, y); }
};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template <>
struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
PetscComplex old, *z = &old;
double *xp = (double *)&x, *yp = (double *)&y;
AtomicInsert<double> op;
z[0] = op(xp[0], yp[0]);
z[1] = op(xp[1], yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template <>
struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
double *xp = (double *)&x, *yp = (double *)&y;
AtomicInsert<double> op;
return op(xp[0], yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
__device__ static llint atomicAdd(llint *address, llint val)
{
return (llint)atomicAdd((ullint *)address, (ullint)val);
}
template <typename Type>
struct AtomicAdd {
__device__ Type operator()(Type &x, Type y) const { return atomicAdd(&x, y); }
};
template <>
struct AtomicAdd<double> {
__device__ double operator()(double &x, double y) const
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x, y);
#else
double *address = &x, val = y;
ullint *address_as_ull = (ullint *)address;
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template <>
struct AtomicAdd<float> {
__device__ float operator()(float &x, float y) const
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x, y);
#else
float *address = &x, val = y;
int *address_as_int = (int *)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template <>
struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal *)&x, *yp = (PetscReal *)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0], yp[0]);
z[1] = op(xp[1], yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val * __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val * __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int *address, int val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val * assumed);
} while (assumed != old);
return (int)old;
}
__device__ static llint atomicMult(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val * (llint)assumed));
} while (assumed != old);
return (llint)old;
}
template <typename Type>
struct AtomicMult {
__device__ Type operator()(Type &x, Type y) const { return atomicMult(&x, y); }
};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static llint atomicMin(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMin(val, (llint)assumed)));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicMax(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMax(val, (llint)assumed)));
} while (assumed != old);
return (llint)old;
}
#endif
template <typename Type>
struct AtomicMin {
__device__ Type operator()(Type &x, Type y) const { return atomicMin(&x, y); }
};
template <typename Type>
struct AtomicMax {
__device__ Type operator()(Type &x, Type y) const { return atomicMax(&x, y); }
};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin() above */
__device__ static llint atomicAnd(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val & (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicOr(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val | (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicXor(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val ^ (llint)assumed));
} while (assumed != old);
return (llint)old;
}
#endif
template <typename Type>
struct AtomicBAND {
__device__ Type operator()(Type &x, Type y) const { return atomicAnd(&x, y); }
};
template <typename Type>
struct AtomicBOR {
__device__ Type operator()(Type &x, Type y) const { return atomicOr(&x, y); }
};
template <typename Type>
struct AtomicBXOR {
__device__ Type operator()(Type &x, Type y) const { return atomicXor(&x, y); }
};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template <typename Type, class Op, int size /* sizeof(Type) */>
struct AtomicLogical;
template <typename Type, class Op>
struct AtomicLogical<Type, Op, 4> {
__device__ Type operator()(Type &x, Type y) const
{
int *address_as_int = (int *)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed, y)));
} while (assumed != old);
return (Type)old;
}
};
template <typename Type, class Op>
struct AtomicLogical<Type, Op, 8> {
__device__ Type operator()(Type &x, Type y) const
{
ullint *address_as_ull = (ullint *)(&x);
ullint old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(op((Type)assumed, y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template <typename Type>
struct land {
__device__ Type operator()(Type x, Type y) { return x && y; }
};
template <typename Type>
struct lor {
__device__ Type operator()(Type x, Type y) { return x || y; }
};
template <typename Type>
struct lxor {
__device__ Type operator()(Type x, Type y) { return (!x != !y); }
};
template <typename Type>
struct AtomicLAND {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, land<Type>, sizeof(Type)> op;
return op(x, y);
}
};
template <typename Type>
struct AtomicLOR {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, lor<Type>, sizeof(Type)> op;
return op(x, y);
}
};
template <typename Type>
struct AtomicLXOR {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, lxor<Type>, sizeof(Type)> op;
return op(x, y);
}
};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, const void *data, void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
if (!opt && !idx) { /* It is a 'CUDA data to nvshmem buf' memory copy */
PetscCallCUDA(hipMemcpyAsync(buf, (char *)data + start * link->unitbytes, count * link->unitbytes, hipMemcpyDeviceToDevice, link->stream));
} else {
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
hipLaunchKernelGGL(( d_Pack<Type, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, start, iarray, idx, (const Type *)data, (Type *)buf);
PetscCallCUDA(hipGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* To specialize UnpackAndOp for the hipMemcpyAsync() below. Usually if this is a contiguous memcpy, we use root/leafdirect and do
not need UnpackAndOp. Only with nvshmem, we need this 'nvshmem buf to CUDA data' memory copy
*/
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode Unpack(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, const void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
if (!opt && !idx) { /* It is a 'nvshmem buf to CUDA data' memory copy */
PetscCallCUDA(hipMemcpyAsync((char *)data + start * link->unitbytes, buf, count * link->unitbytes, hipMemcpyDeviceToDevice, link->stream));
} else {
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
hipLaunchKernelGGL(( d_UnpackAndOp<Type, Insert<Type>, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, start, iarray, idx, (Type *)data, (const Type *)buf);
PetscCallCUDA(hipGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, const void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
hipLaunchKernelGGL(( d_UnpackAndOp<Type, Op, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, start, iarray, idx, (Type *)data, (const Type *)buf);
PetscCallCUDA(hipGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
hipLaunchKernelGGL(( d_FetchAndOp<Type, Op, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, start, iarray, idx, (Type *)data, (Type *)buf);
PetscCallCUDA(hipGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link, PetscInt count, PetscInt srcStart, PetscSFPackOpt srcOpt, const PetscInt *srcIdx, const void *src, PetscInt dstStart, PetscSFPackOpt dstOpt, const PetscInt *dstIdx, void *dst)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
PetscInt srcx = 0, srcy = 0, srcX = 0, srcY = 0, dstx = 0, dsty = 0, dstX = 0, dstY = 0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {
srcx = srcOpt->dx[0];
srcy = srcOpt->dy[0];
srcX = srcOpt->X[0];
srcY = srcOpt->Y[0];
srcStart = srcOpt->start[0];
srcIdx = NULL;
} else if (!srcIdx) {
srcx = srcX = count;
srcy = srcY = 1;
}
if (dstOpt) {
dstx = dstOpt->dx[0];
dsty = dstOpt->dy[0];
dstX = dstOpt->X[0];
dstY = dstOpt->Y[0];
dstStart = dstOpt->start[0];
dstIdx = NULL;
} else if (!dstIdx) {
dstx = dstX = count;
dsty = dstY = 1;
}
hipLaunchKernelGGL(( d_ScatterAndOp<Type, Op, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, srcx, srcy, srcX, srcY, srcStart, srcIdx, (const Type *)src, dstx, dsty, dstX, dstY, dstStart, dstIdx, (Type *)dst);
PetscCallCUDA(hipGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Specialization for Insert since we may use hipMemcpyAsync */
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link, PetscInt count, PetscInt srcStart, PetscSFPackOpt srcOpt, const PetscInt *srcIdx, const void *src, PetscInt dstStart, PetscSFPackOpt dstOpt, const PetscInt *dstIdx, void *dst)
{
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
PetscCallCUDA(hipMemcpyAsync((Type *)dst + dstStart * link->bs, (const Type *)src + srcStart * link->bs, count * link->unitbytes, hipMemcpyDeviceToDevice, link->stream));
} else {
PetscCall(ScatterAndOp<Type, Insert<Type>, BS, EQ>(link, count, srcStart, srcOpt, srcIdx, src, dstStart, dstOpt, dstIdx, dst));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link, PetscInt count, PetscInt rootstart, PetscSFPackOpt rootopt, const PetscInt *rootidx, void *rootdata, PetscInt leafstart, PetscSFPackOpt leafopt, const PetscInt *leafidx, const void *leafdata, void *leafupdate)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
hipLaunchKernelGGL(( d_FetchAndOpLocal<Type, Op, BS, EQ>), dim3(nblocks), dim3(nthreads), 0, link->stream, link->bs, count, rootstart, rarray, rootidx, (Type *)rootdata, leafstart, larray, leafidx, (const Type *)leafdata, (Type *)leafupdate);
PetscCallCUDA(hipGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type, Min<Type>, BS, EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type, Max<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type, Min<Type>, BS, EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type, Max<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_FetchAndAdd = FetchAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type, AtomicAdd<Type>, BS, EQ>;
}
/* Have this templated class to specialize for char integers */
template <typename Type, PetscInt BS, PetscInt EQ, PetscInt size /*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link)
{
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type, AtomicLAND<Type>, BS, EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type, AtomicLOR<Type>, BS, EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type, AtomicLXOR<Type>, BS, EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type, AtomicBAND<Type>, BS, EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type, AtomicBOR<Type>, BS, EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type, AtomicBXOR<Type>, BS, EQ>;
link->da_FetchAndAdd = FetchAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type, AtomicLAND<Type>, BS, EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type, AtomicLOR<Type>, BS, EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type, AtomicLXOR<Type>, BS, EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type, AtomicBAND<Type>, BS, EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type, AtomicBOR<Type>, BS, EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type, AtomicBXOR<Type>, BS, EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type, AtomicAdd<Type>, BS, EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template <typename Type, PetscInt BS, PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type, BS, EQ, 1> {
static void Init(PetscSFLink)
{ /* Nothing to leave function pointers NULL */
}
};
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type, Min<Type>, BS, EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type, Max<Type>, BS, EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type, LAND<Type>, BS, EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type, LOR<Type>, BS, EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type, LXOR<Type>, BS, EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type, BAND<Type>, BS, EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type, BOR<Type>, BS, EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type, BXOR<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type, Min<Type>, BS, EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type, Max<Type>, BS, EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type, LAND<Type>, BS, EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type, LOR<Type>, BS, EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type, LXOR<Type>, BS, EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type, BAND<Type>, BS, EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type, BOR<Type>, BS, EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type, BXOR<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
PackInit_IntegerType_Atomic<Type, BS, EQ, sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {
int a;
int b;
} PairInt;
typedef struct {
PetscInt a;
PetscInt b;
} PairPetscInt;
template <typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type, 1, 1>;
link->d_UnpackAndInsert = Unpack<Type, 1, 1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type, Maxloc<Type>, 1, 1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type, Minloc<Type>, 1, 1>;
link->d_ScatterAndInsert = ScatterAndOp<Type, Insert<Type>, 1, 1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type, Maxloc<Type>, 1, 1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type, Minloc<Type>, 1, 1>;
/* Atomics for pair types are not implemented yet */
}
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_CUDA(PetscSFLink)
{
PetscFunctionBegin;
PetscCallCUDA(hipDeviceSynchronize());
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscSFLinkSyncStream_CUDA(PetscSFLink link)
{
PetscFunctionBegin;
PetscCallCUDA(hipStreamSynchronize(link->stream));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscSFLinkMemcpy_CUDA(PetscSFLink link, PetscMemType dstmtype, void *dst, PetscMemType srcmtype, const void *src, size_t n)
{
PetscFunctionBegin;
enum hipMemcpyKind kinds[2][2] = {
{hipMemcpyHostToHost, hipMemcpyHostToDevice },
{hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice}
};
if (n) {
if (PetscMemTypeHost(dstmtype) && PetscMemTypeHost(srcmtype)) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscCall(PetscMemcpy(dst, src, n));
} else {
int stype = PetscMemTypeDevice(srcmtype) ? 1 : 0;
int dtype = PetscMemTypeDevice(dstmtype) ? 1 : 0;
PetscCallCUDA(hipMemcpyAsync(dst, src, n, kinds[stype][dtype], link->stream));
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFMalloc_CUDA(PetscMemType mtype, size_t size, void **ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) PetscCall(PetscMalloc(size, ptr));
else if (PetscMemTypeDevice(mtype)) {
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA));
PetscCallCUDA(hipMalloc(ptr, size));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFFree_CUDA(PetscMemType mtype, void *ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) PetscCall(PetscFree(ptr));
else if (PetscMemTypeDevice(mtype)) PetscCallCUDA(hipFree(ptr));
else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Destructor when the link uses MPI for communication on CUDA device */
static PetscErrorCode PetscSFLinkDestroy_MPI_CUDA(PetscSF, PetscSFLink link)
{
PetscFunctionBegin;
for (int i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
PetscCallCUDA(hipFree(link->rootbuf_alloc[i][PETSC_MEMTYPE_DEVICE]));
PetscCallCUDA(hipFree(link->leafbuf_alloc[i][PETSC_MEMTYPE_DEVICE]));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_CUDA(PetscSF sf, PetscSFLink link, MPI_Datatype unit)
{
PetscInt nSignedChar = 0, nUnsignedChar = 0, nInt = 0, nPetscInt = 0, nPetscReal = 0;
PetscBool is2Int, is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex = 0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(PETSC_SUCCESS);
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_SIGNED_CHAR, &nSignedChar));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_UNSIGNED_CHAR, &nUnsignedChar));
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_INT, &nInt));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_INT, &nPetscInt));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nPetscReal));
#if defined(PETSC_HAVE_COMPLEX)
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &nPetscComplex));
#endif
PetscCall(MPIPetsc_Type_compare(unit, MPI_2INT, &is2Int));
PetscCall(MPIPetsc_Type_compare(unit, MPIU_2INT, &is2PetscInt));
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscReal == 8) PackInit_RealType<PetscReal, 8, 1>(link);
else if (nPetscReal % 8 == 0) PackInit_RealType<PetscReal, 8, 0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal, 4, 1>(link);
else if (nPetscReal % 4 == 0) PackInit_RealType<PetscReal, 4, 0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal, 2, 1>(link);
else if (nPetscReal % 2 == 0) PackInit_RealType<PetscReal, 2, 0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal, 1, 1>(link);
else if (nPetscReal % 1 == 0)
#endif
PackInit_RealType<PetscReal, 1, 0>(link);
} else if (nPetscInt && sizeof(PetscInt) == sizeof(llint)) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscInt == 8) PackInit_IntegerType<llint, 8, 1>(link);
else if (nPetscInt % 8 == 0) PackInit_IntegerType<llint, 8, 0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<llint, 4, 1>(link);
else if (nPetscInt % 4 == 0) PackInit_IntegerType<llint, 4, 0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<llint, 2, 1>(link);
else if (nPetscInt % 2 == 0) PackInit_IntegerType<llint, 2, 0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<llint, 1, 1>(link);
else if (nPetscInt % 1 == 0)
#endif
PackInit_IntegerType<llint, 1, 0>(link);
} else if (nInt) {
#if !defined(PETSC_HAVE_DEVICE)
if (nInt == 8) PackInit_IntegerType<int, 8, 1>(link);
else if (nInt % 8 == 0) PackInit_IntegerType<int, 8, 0>(link);
else if (nInt == 4) PackInit_IntegerType<int, 4, 1>(link);
else if (nInt % 4 == 0) PackInit_IntegerType<int, 4, 0>(link);
else if (nInt == 2) PackInit_IntegerType<int, 2, 1>(link);
else if (nInt % 2 == 0) PackInit_IntegerType<int, 2, 0>(link);
else if (nInt == 1) PackInit_IntegerType<int, 1, 1>(link);
else if (nInt % 1 == 0)
#endif
PackInit_IntegerType<int, 1, 0>(link);
} else if (nSignedChar) {
#if !defined(PETSC_HAVE_DEVICE)
if (nSignedChar == 8) PackInit_IntegerType<SignedChar, 8, 1>(link);
else if (nSignedChar % 8 == 0) PackInit_IntegerType<SignedChar, 8, 0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar, 4, 1>(link);
else if (nSignedChar % 4 == 0) PackInit_IntegerType<SignedChar, 4, 0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar, 2, 1>(link);
else if (nSignedChar % 2 == 0) PackInit_IntegerType<SignedChar, 2, 0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar, 1, 1>(link);
else if (nSignedChar % 1 == 0)
#endif
PackInit_IntegerType<SignedChar, 1, 0>(link);
} else if (nUnsignedChar) {
#if !defined(PETSC_HAVE_DEVICE)
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar, 8, 1>(link);
else if (nUnsignedChar % 8 == 0) PackInit_IntegerType<UnsignedChar, 8, 0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar, 4, 1>(link);
else if (nUnsignedChar % 4 == 0) PackInit_IntegerType<UnsignedChar, 4, 0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar, 2, 1>(link);
else if (nUnsignedChar % 2 == 0) PackInit_IntegerType<UnsignedChar, 2, 0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar, 1, 1>(link);
else if (nUnsignedChar % 1 == 0)
#endif
PackInit_IntegerType<UnsignedChar, 1, 0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex, 8, 1>(link);
else if (nPetscComplex % 8 == 0) PackInit_ComplexType<PetscComplex, 8, 0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex, 4, 1>(link);
else if (nPetscComplex % 4 == 0) PackInit_ComplexType<PetscComplex, 4, 0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex, 2, 1>(link);
else if (nPetscComplex % 2 == 0) PackInit_ComplexType<PetscComplex, 2, 0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex, 1, 1>(link);
else if (nPetscComplex % 1 == 0)
#endif
PackInit_ComplexType<PetscComplex, 1, 0>(link);
#endif
} else {
MPI_Aint lb, nbyte;
PetscCallMPI(MPI_Type_get_extent(unit, &lb, &nbyte));
PetscCheck(lb == 0, PETSC_COMM_SELF, PETSC_ERR_SUP, "Datatype with nonzero lower bound %ld", (long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
#if !defined(PETSC_HAVE_DEVICE)
if (nbyte == 4) PackInit_DumbType<char, 4, 1>(link);
else if (nbyte % 4 == 0) PackInit_DumbType<char, 4, 0>(link);
else if (nbyte == 2) PackInit_DumbType<char, 2, 1>(link);
else if (nbyte % 2 == 0) PackInit_DumbType<char, 2, 0>(link);
else if (nbyte == 1) PackInit_DumbType<char, 1, 1>(link);
else if (nbyte % 1 == 0)
#endif
PackInit_DumbType<char, 1, 0>(link);
} else {
nInt = nbyte / sizeof(int);
#if !defined(PETSC_HAVE_DEVICE)
if (nInt == 8) PackInit_DumbType<int, 8, 1>(link);
else if (nInt % 8 == 0) PackInit_DumbType<int, 8, 0>(link);
else if (nInt == 4) PackInit_DumbType<int, 4, 1>(link);
else if (nInt % 4 == 0) PackInit_DumbType<int, 4, 0>(link);
else if (nInt == 2) PackInit_DumbType<int, 2, 1>(link);
else if (nInt % 2 == 0) PackInit_DumbType<int, 2, 0>(link);
else if (nInt == 1) PackInit_DumbType<int, 1, 1>(link);
else if (nInt % 1 == 0)
#endif
PackInit_DumbType<int, 1, 0>(link);
}
}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct hipDeviceProp_t props;
PetscCallCUDA(hipGetDevice(&device));
PetscCallCUDA(hipGetDeviceProperties(&props, device));
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor * props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->stream = PetscDefaultCudaStream;
link->Destroy = PetscSFLinkDestroy_MPI_CUDA;
link->SyncDevice = PetscSFLinkSyncDevice_CUDA;
link->SyncStream = PetscSFLinkSyncStream_CUDA;
link->Memcpy = PetscSFLinkMemcpy_CUDA;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(PETSC_SUCCESS);
}
| e432c98b23c6f976032ee6163c03515af6828ef1.cu | #include <../src/vec/is/sf/impls/basic/sfpack.h>
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt, PetscInt tid)
{
PetscInt i, j, k, m, n, r;
const PetscInt *offset, *start, *dx, *dy, *X, *Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2 * n + 2;
dy = opt + 3 * n + 2;
X = opt + 5 * n + 2;
Y = opt + 6 * n + 2;
for (r = 0; r < n; r++) {
if (tid < offset[r + 1]) break;
}
m = (tid - offset[r]);
k = m / (dx[r] * dy[r]);
j = (m - k * dx[r] * dy[r]) / dx[r];
i = m - k * dx[r] * dy[r] - j * dx[r];
return (start[r] + k * X[r] * Y[r] + j * X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template <class Type, PetscInt BS, PetscInt EQ>
__global__ static void d_Pack(PetscInt bs, PetscInt count, PetscInt start, const PetscInt *opt, const PetscInt *idx, const Type *data, Type *buf)
{
PetscInt i, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M * BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid < count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt ? MapTidToIndex(opt, tid) : (idx ? idx[tid] : start + tid)) * MBS;
s = tid * MBS;
for (i = 0; i < MBS; i++) buf[s + i] = data[t + i];
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs, PetscInt count, PetscInt start, const PetscInt *opt, const PetscInt *idx, Type *data, const Type *buf)
{
PetscInt i, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
t = (opt ? MapTidToIndex(opt, tid) : (idx ? idx[tid] : start + tid)) * MBS;
s = tid * MBS;
for (i = 0; i < MBS; i++) op(data[t + i], buf[s + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs, PetscInt count, PetscInt rootstart, const PetscInt *rootopt, const PetscInt *rootidx, Type *rootdata, Type *leafbuf)
{
PetscInt i, l, r, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
r = (rootopt ? MapTidToIndex(rootopt, tid) : (rootidx ? rootidx[tid] : rootstart + tid)) * MBS;
l = tid * MBS;
for (i = 0; i < MBS; i++) leafbuf[l + i] = op(rootdata[r + i], leafbuf[l + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs, PetscInt count, PetscInt srcx, PetscInt srcy, PetscInt srcX, PetscInt srcY, PetscInt srcStart, const PetscInt *srcIdx, const Type *src, PetscInt dstx, PetscInt dsty, PetscInt dstX, PetscInt dstY, PetscInt dstStart, const PetscInt *dstIdx, Type *dst)
{
PetscInt i, j, k, s, t, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid / (srcx * srcy);
j = (tid - k * srcx * srcy) / srcx;
i = tid - k * srcx * srcy - j * srcx;
s = srcStart + k * srcX * srcY + j * srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid / (dstx * dsty);
j = (tid - k * dstx * dsty) / dstx;
i = tid - k * dstx * dsty - j * dstx;
t = dstStart + k * dstX * dstY + j * dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i = 0; i < MBS; i++) op(dst[t + i], src[s + i]);
}
}
template <class Type, class Op, PetscInt BS, PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs, PetscInt count, PetscInt rootstart, const PetscInt *rootopt, const PetscInt *rootidx, Type *rootdata, PetscInt leafstart, const PetscInt *leafopt, const PetscInt *leafidx, const Type *leafdata, Type *leafupdate)
{
PetscInt i, l, r, tid = blockIdx.x * blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs / BS, MBS = M * BS;
Op op;
for (; tid < count; tid += grid_size) {
r = (rootopt ? MapTidToIndex(rootopt, tid) : (rootidx ? rootidx[tid] : rootstart + tid)) * MBS;
l = (leafopt ? MapTidToIndex(leafopt, tid) : (leafidx ? leafidx[tid] : leafstart + tid)) * MBS;
for (i = 0; i < MBS; i++) leafupdate[l + i] = op(rootdata[r + i], leafdata[l + i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template <typename Type>
struct Insert {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = y;
return old;
}
};
template <typename Type>
struct Add {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x += y;
return old;
}
};
template <typename Type>
struct Mult {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x *= y;
return old;
}
};
template <typename Type>
struct Min {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = PetscMin(x, y);
return old;
}
};
template <typename Type>
struct Max {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = PetscMax(x, y);
return old;
}
};
template <typename Type>
struct LAND {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x && y;
return old;
}
};
template <typename Type>
struct LOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x || y;
return old;
}
};
template <typename Type>
struct LXOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = !x != !y;
return old;
}
};
template <typename Type>
struct BAND {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x & y;
return old;
}
};
template <typename Type>
struct BOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x | y;
return old;
}
};
template <typename Type>
struct BXOR {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
x = x ^ y;
return old;
}
};
template <typename Type>
struct Minloc {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b, y.b);
return old;
}
};
template <typename Type>
struct Maxloc {
__device__ Type operator()(Type &x, Type y) const
{
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b, y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPI_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double *address, double val)
{
return __longlong_as_double(atomicExch((ullint *)address, __double_as_longlong(val)));
}
__device__ static llint atomicExch(llint *address, llint val)
{
return (llint)(atomicExch((ullint *)address, (ullint)val));
}
template <typename Type>
struct AtomicInsert {
__device__ Type operator()(Type &x, Type y) const { return atomicExch(&x, y); }
};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template <>
struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
PetscComplex old, *z = &old;
double *xp = (double *)&x, *yp = (double *)&y;
AtomicInsert<double> op;
z[0] = op(xp[0], yp[0]);
z[1] = op(xp[1], yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template <>
struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
double *xp = (double *)&x, *yp = (double *)&y;
AtomicInsert<double> op;
return op(xp[0], yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
__device__ static llint atomicAdd(llint *address, llint val)
{
return (llint)atomicAdd((ullint *)address, (ullint)val);
}
template <typename Type>
struct AtomicAdd {
__device__ Type operator()(Type &x, Type y) const { return atomicAdd(&x, y); }
};
template <>
struct AtomicAdd<double> {
__device__ double operator()(double &x, double y) const
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x, y);
#else
double *address = &x, val = y;
ullint *address_as_ull = (ullint *)address;
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template <>
struct AtomicAdd<float> {
__device__ float operator()(float &x, float y) const
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x, y);
#else
float *address = &x, val = y;
int *address_as_int = (int *)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template <>
struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator()(PetscComplex &x, PetscComplex y) const
{
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal *)&x, *yp = (PetscReal *)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0], yp[0]);
z[1] = op(xp[1], yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val * __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val * __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int *address, int val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val * assumed);
} while (assumed != old);
return (int)old;
}
__device__ static llint atomicMult(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val * (llint)assumed));
} while (assumed != old);
return (llint)old;
}
template <typename Type>
struct AtomicMult {
__device__ Type operator()(Type &x, Type y) const { return atomicMult(&x, y); }
};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double *address, double val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float *address, float val)
{
int *address_as_int = (int *)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static llint atomicMin(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMin(val, (llint)assumed)));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicMax(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMax(val, (llint)assumed)));
} while (assumed != old);
return (llint)old;
}
#endif
template <typename Type>
struct AtomicMin {
__device__ Type operator()(Type &x, Type y) const { return atomicMin(&x, y); }
};
template <typename Type>
struct AtomicMax {
__device__ Type operator()(Type &x, Type y) const { return atomicMax(&x, y); }
};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin() above */
__device__ static llint atomicAnd(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val & (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicOr(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val | (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicXor(llint *address, llint val)
{
ullint *address_as_ull = (ullint *)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val ^ (llint)assumed));
} while (assumed != old);
return (llint)old;
}
#endif
template <typename Type>
struct AtomicBAND {
__device__ Type operator()(Type &x, Type y) const { return atomicAnd(&x, y); }
};
template <typename Type>
struct AtomicBOR {
__device__ Type operator()(Type &x, Type y) const { return atomicOr(&x, y); }
};
template <typename Type>
struct AtomicBXOR {
__device__ Type operator()(Type &x, Type y) const { return atomicXor(&x, y); }
};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template <typename Type, class Op, int size /* sizeof(Type) */>
struct AtomicLogical;
template <typename Type, class Op>
struct AtomicLogical<Type, Op, 4> {
__device__ Type operator()(Type &x, Type y) const
{
int *address_as_int = (int *)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed, y)));
} while (assumed != old);
return (Type)old;
}
};
template <typename Type, class Op>
struct AtomicLogical<Type, Op, 8> {
__device__ Type operator()(Type &x, Type y) const
{
ullint *address_as_ull = (ullint *)(&x);
ullint old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(op((Type)assumed, y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template <typename Type>
struct land {
__device__ Type operator()(Type x, Type y) { return x && y; }
};
template <typename Type>
struct lor {
__device__ Type operator()(Type x, Type y) { return x || y; }
};
template <typename Type>
struct lxor {
__device__ Type operator()(Type x, Type y) { return (!x != !y); }
};
template <typename Type>
struct AtomicLAND {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, land<Type>, sizeof(Type)> op;
return op(x, y);
}
};
template <typename Type>
struct AtomicLOR {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, lor<Type>, sizeof(Type)> op;
return op(x, y);
}
};
template <typename Type>
struct AtomicLXOR {
__device__ Type operator()(Type &x, Type y) const
{
AtomicLogical<Type, lxor<Type>, sizeof(Type)> op;
return op(x, y);
}
};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, const void *data, void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
if (!opt && !idx) { /* It is a 'CUDA data to nvshmem buf' memory copy */
PetscCallCUDA(cudaMemcpyAsync(buf, (char *)data + start * link->unitbytes, count * link->unitbytes, cudaMemcpyDeviceToDevice, link->stream));
} else {
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
d_Pack<Type, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, start, iarray, idx, (const Type *)data, (Type *)buf);
PetscCallCUDA(cudaGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* To specialize UnpackAndOp for the cudaMemcpyAsync() below. Usually if this is a contiguous memcpy, we use root/leafdirect and do
not need UnpackAndOp. Only with nvshmem, we need this 'nvshmem buf to CUDA data' memory copy
*/
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode Unpack(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, const void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
if (!opt && !idx) { /* It is a 'nvshmem buf to CUDA data' memory copy */
PetscCallCUDA(cudaMemcpyAsync((char *)data + start * link->unitbytes, buf, count * link->unitbytes, cudaMemcpyDeviceToDevice, link->stream));
} else {
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
d_UnpackAndOp<Type, Insert<Type>, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, start, iarray, idx, (Type *)data, (const Type *)buf);
PetscCallCUDA(cudaGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, const void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
d_UnpackAndOp<Type, Op, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, start, iarray, idx, (Type *)data, (const Type *)buf);
PetscCallCUDA(cudaGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link, PetscInt count, PetscInt start, PetscSFPackOpt opt, const PetscInt *idx, void *data, void *buf)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *iarray = opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
d_FetchAndOp<Type, Op, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, start, iarray, idx, (Type *)data, (Type *)buf);
PetscCallCUDA(cudaGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link, PetscInt count, PetscInt srcStart, PetscSFPackOpt srcOpt, const PetscInt *srcIdx, const void *src, PetscInt dstStart, PetscSFPackOpt dstOpt, const PetscInt *dstIdx, void *dst)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
PetscInt srcx = 0, srcy = 0, srcX = 0, srcY = 0, dstx = 0, dsty = 0, dstX = 0, dstY = 0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {
srcx = srcOpt->dx[0];
srcy = srcOpt->dy[0];
srcX = srcOpt->X[0];
srcY = srcOpt->Y[0];
srcStart = srcOpt->start[0];
srcIdx = NULL;
} else if (!srcIdx) {
srcx = srcX = count;
srcy = srcY = 1;
}
if (dstOpt) {
dstx = dstOpt->dx[0];
dsty = dstOpt->dy[0];
dstX = dstOpt->X[0];
dstY = dstOpt->Y[0];
dstStart = dstOpt->start[0];
dstIdx = NULL;
} else if (!dstIdx) {
dstx = dstX = count;
dsty = dstY = 1;
}
d_ScatterAndOp<Type, Op, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, srcx, srcy, srcX, srcY, srcStart, srcIdx, (const Type *)src, dstx, dsty, dstX, dstY, dstStart, dstIdx, (Type *)dst);
PetscCallCUDA(cudaGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Specialization for Insert since we may use cudaMemcpyAsync */
template <typename Type, PetscInt BS, PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link, PetscInt count, PetscInt srcStart, PetscSFPackOpt srcOpt, const PetscInt *srcIdx, const void *src, PetscInt dstStart, PetscSFPackOpt dstOpt, const PetscInt *dstIdx, void *dst)
{
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
PetscCallCUDA(cudaMemcpyAsync((Type *)dst + dstStart * link->bs, (const Type *)src + srcStart * link->bs, count * link->unitbytes, cudaMemcpyDeviceToDevice, link->stream));
} else {
PetscCall(ScatterAndOp<Type, Insert<Type>, BS, EQ>(link, count, srcStart, srcOpt, srcIdx, src, dstStart, dstOpt, dstIdx, dst));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
template <typename Type, class Op, PetscInt BS, PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link, PetscInt count, PetscInt rootstart, PetscSFPackOpt rootopt, const PetscInt *rootidx, void *rootdata, PetscInt leafstart, PetscSFPackOpt leafopt, const PetscInt *leafidx, const void *leafdata, void *leafupdate)
{
PetscInt nthreads = 256;
PetscInt nblocks = (count + nthreads - 1) / nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(PETSC_SUCCESS);
nblocks = PetscMin(nblocks, link->maxResidentThreadsPerGPU / nthreads);
d_FetchAndOpLocal<Type, Op, BS, EQ><<<nblocks, nthreads, 0, link->stream>>>(link->bs, count, rootstart, rarray, rootidx, (Type *)rootdata, leafstart, larray, leafidx, (const Type *)leafdata, (Type *)leafupdate);
PetscCallCUDA(cudaGetLastError());
PetscFunctionReturn(PETSC_SUCCESS);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type, Min<Type>, BS, EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type, Max<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type, Min<Type>, BS, EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type, Max<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_FetchAndAdd = FetchAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type, AtomicAdd<Type>, BS, EQ>;
}
/* Have this templated class to specialize for char integers */
template <typename Type, PetscInt BS, PetscInt EQ, PetscInt size /*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link)
{
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type, AtomicLAND<Type>, BS, EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type, AtomicLOR<Type>, BS, EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type, AtomicLXOR<Type>, BS, EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type, AtomicBAND<Type>, BS, EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type, AtomicBOR<Type>, BS, EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type, AtomicBXOR<Type>, BS, EQ>;
link->da_FetchAndAdd = FetchAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type, AtomicMult<Type>, BS, EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type, AtomicMin<Type>, BS, EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type, AtomicMax<Type>, BS, EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type, AtomicLAND<Type>, BS, EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type, AtomicLOR<Type>, BS, EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type, AtomicLXOR<Type>, BS, EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type, AtomicBAND<Type>, BS, EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type, AtomicBOR<Type>, BS, EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type, AtomicBXOR<Type>, BS, EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type, AtomicAdd<Type>, BS, EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template <typename Type, PetscInt BS, PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type, BS, EQ, 1> {
static void Init(PetscSFLink)
{ /* Nothing to leave function pointers NULL */
}
};
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type, Min<Type>, BS, EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type, Max<Type>, BS, EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type, LAND<Type>, BS, EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type, LOR<Type>, BS, EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type, LXOR<Type>, BS, EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type, BAND<Type>, BS, EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type, BOR<Type>, BS, EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type, BXOR<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type, Min<Type>, BS, EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type, Max<Type>, BS, EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type, LAND<Type>, BS, EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type, LOR<Type>, BS, EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type, LXOR<Type>, BS, EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type, BAND<Type>, BS, EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type, BOR<Type>, BS, EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type, BXOR<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
PackInit_IntegerType_Atomic<Type, BS, EQ, sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type, Add<Type>, BS, EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type, Mult<Type>, BS, EQ>;
link->d_FetchAndAdd = FetchAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type, Add<Type>, BS, EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type, Mult<Type>, BS, EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type, Add<Type>, BS, EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type, AtomicAdd<Type>, BS, EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type, AtomicInsert<Type>, BS, EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type, AtomicAdd<Type>, BS, EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {
int a;
int b;
} PairInt;
typedef struct {
PetscInt a;
PetscInt b;
} PairPetscInt;
template <typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type, 1, 1>;
link->d_UnpackAndInsert = Unpack<Type, 1, 1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type, Maxloc<Type>, 1, 1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type, Minloc<Type>, 1, 1>;
link->d_ScatterAndInsert = ScatterAndOp<Type, Insert<Type>, 1, 1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type, Maxloc<Type>, 1, 1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type, Minloc<Type>, 1, 1>;
/* Atomics for pair types are not implemented yet */
}
template <typename Type, PetscInt BS, PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type, BS, EQ>;
link->d_UnpackAndInsert = Unpack<Type, BS, EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type, BS, EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_CUDA(PetscSFLink)
{
PetscFunctionBegin;
PetscCallCUDA(cudaDeviceSynchronize());
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscSFLinkSyncStream_CUDA(PetscSFLink link)
{
PetscFunctionBegin;
PetscCallCUDA(cudaStreamSynchronize(link->stream));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscSFLinkMemcpy_CUDA(PetscSFLink link, PetscMemType dstmtype, void *dst, PetscMemType srcmtype, const void *src, size_t n)
{
PetscFunctionBegin;
enum cudaMemcpyKind kinds[2][2] = {
{cudaMemcpyHostToHost, cudaMemcpyHostToDevice },
{cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice}
};
if (n) {
if (PetscMemTypeHost(dstmtype) && PetscMemTypeHost(srcmtype)) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscCall(PetscMemcpy(dst, src, n));
} else {
int stype = PetscMemTypeDevice(srcmtype) ? 1 : 0;
int dtype = PetscMemTypeDevice(dstmtype) ? 1 : 0;
PetscCallCUDA(cudaMemcpyAsync(dst, src, n, kinds[stype][dtype], link->stream));
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFMalloc_CUDA(PetscMemType mtype, size_t size, void **ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) PetscCall(PetscMalloc(size, ptr));
else if (PetscMemTypeDevice(mtype)) {
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA));
PetscCallCUDA(cudaMalloc(ptr, size));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFFree_CUDA(PetscMemType mtype, void *ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) PetscCall(PetscFree(ptr));
else if (PetscMemTypeDevice(mtype)) PetscCallCUDA(cudaFree(ptr));
else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Destructor when the link uses MPI for communication on CUDA device */
static PetscErrorCode PetscSFLinkDestroy_MPI_CUDA(PetscSF, PetscSFLink link)
{
PetscFunctionBegin;
for (int i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
PetscCallCUDA(cudaFree(link->rootbuf_alloc[i][PETSC_MEMTYPE_DEVICE]));
PetscCallCUDA(cudaFree(link->leafbuf_alloc[i][PETSC_MEMTYPE_DEVICE]));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_CUDA(PetscSF sf, PetscSFLink link, MPI_Datatype unit)
{
PetscInt nSignedChar = 0, nUnsignedChar = 0, nInt = 0, nPetscInt = 0, nPetscReal = 0;
PetscBool is2Int, is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex = 0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(PETSC_SUCCESS);
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_SIGNED_CHAR, &nSignedChar));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_UNSIGNED_CHAR, &nUnsignedChar));
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
PetscCall(MPIPetsc_Type_compare_contig(unit, MPI_INT, &nInt));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_INT, &nPetscInt));
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nPetscReal));
#if defined(PETSC_HAVE_COMPLEX)
PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &nPetscComplex));
#endif
PetscCall(MPIPetsc_Type_compare(unit, MPI_2INT, &is2Int));
PetscCall(MPIPetsc_Type_compare(unit, MPIU_2INT, &is2PetscInt));
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscReal == 8) PackInit_RealType<PetscReal, 8, 1>(link);
else if (nPetscReal % 8 == 0) PackInit_RealType<PetscReal, 8, 0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal, 4, 1>(link);
else if (nPetscReal % 4 == 0) PackInit_RealType<PetscReal, 4, 0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal, 2, 1>(link);
else if (nPetscReal % 2 == 0) PackInit_RealType<PetscReal, 2, 0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal, 1, 1>(link);
else if (nPetscReal % 1 == 0)
#endif
PackInit_RealType<PetscReal, 1, 0>(link);
} else if (nPetscInt && sizeof(PetscInt) == sizeof(llint)) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscInt == 8) PackInit_IntegerType<llint, 8, 1>(link);
else if (nPetscInt % 8 == 0) PackInit_IntegerType<llint, 8, 0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<llint, 4, 1>(link);
else if (nPetscInt % 4 == 0) PackInit_IntegerType<llint, 4, 0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<llint, 2, 1>(link);
else if (nPetscInt % 2 == 0) PackInit_IntegerType<llint, 2, 0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<llint, 1, 1>(link);
else if (nPetscInt % 1 == 0)
#endif
PackInit_IntegerType<llint, 1, 0>(link);
} else if (nInt) {
#if !defined(PETSC_HAVE_DEVICE)
if (nInt == 8) PackInit_IntegerType<int, 8, 1>(link);
else if (nInt % 8 == 0) PackInit_IntegerType<int, 8, 0>(link);
else if (nInt == 4) PackInit_IntegerType<int, 4, 1>(link);
else if (nInt % 4 == 0) PackInit_IntegerType<int, 4, 0>(link);
else if (nInt == 2) PackInit_IntegerType<int, 2, 1>(link);
else if (nInt % 2 == 0) PackInit_IntegerType<int, 2, 0>(link);
else if (nInt == 1) PackInit_IntegerType<int, 1, 1>(link);
else if (nInt % 1 == 0)
#endif
PackInit_IntegerType<int, 1, 0>(link);
} else if (nSignedChar) {
#if !defined(PETSC_HAVE_DEVICE)
if (nSignedChar == 8) PackInit_IntegerType<SignedChar, 8, 1>(link);
else if (nSignedChar % 8 == 0) PackInit_IntegerType<SignedChar, 8, 0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar, 4, 1>(link);
else if (nSignedChar % 4 == 0) PackInit_IntegerType<SignedChar, 4, 0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar, 2, 1>(link);
else if (nSignedChar % 2 == 0) PackInit_IntegerType<SignedChar, 2, 0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar, 1, 1>(link);
else if (nSignedChar % 1 == 0)
#endif
PackInit_IntegerType<SignedChar, 1, 0>(link);
} else if (nUnsignedChar) {
#if !defined(PETSC_HAVE_DEVICE)
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar, 8, 1>(link);
else if (nUnsignedChar % 8 == 0) PackInit_IntegerType<UnsignedChar, 8, 0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar, 4, 1>(link);
else if (nUnsignedChar % 4 == 0) PackInit_IntegerType<UnsignedChar, 4, 0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar, 2, 1>(link);
else if (nUnsignedChar % 2 == 0) PackInit_IntegerType<UnsignedChar, 2, 0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar, 1, 1>(link);
else if (nUnsignedChar % 1 == 0)
#endif
PackInit_IntegerType<UnsignedChar, 1, 0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
#if !defined(PETSC_HAVE_DEVICE)
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex, 8, 1>(link);
else if (nPetscComplex % 8 == 0) PackInit_ComplexType<PetscComplex, 8, 0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex, 4, 1>(link);
else if (nPetscComplex % 4 == 0) PackInit_ComplexType<PetscComplex, 4, 0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex, 2, 1>(link);
else if (nPetscComplex % 2 == 0) PackInit_ComplexType<PetscComplex, 2, 0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex, 1, 1>(link);
else if (nPetscComplex % 1 == 0)
#endif
PackInit_ComplexType<PetscComplex, 1, 0>(link);
#endif
} else {
MPI_Aint lb, nbyte;
PetscCallMPI(MPI_Type_get_extent(unit, &lb, &nbyte));
PetscCheck(lb == 0, PETSC_COMM_SELF, PETSC_ERR_SUP, "Datatype with nonzero lower bound %ld", (long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
#if !defined(PETSC_HAVE_DEVICE)
if (nbyte == 4) PackInit_DumbType<char, 4, 1>(link);
else if (nbyte % 4 == 0) PackInit_DumbType<char, 4, 0>(link);
else if (nbyte == 2) PackInit_DumbType<char, 2, 1>(link);
else if (nbyte % 2 == 0) PackInit_DumbType<char, 2, 0>(link);
else if (nbyte == 1) PackInit_DumbType<char, 1, 1>(link);
else if (nbyte % 1 == 0)
#endif
PackInit_DumbType<char, 1, 0>(link);
} else {
nInt = nbyte / sizeof(int);
#if !defined(PETSC_HAVE_DEVICE)
if (nInt == 8) PackInit_DumbType<int, 8, 1>(link);
else if (nInt % 8 == 0) PackInit_DumbType<int, 8, 0>(link);
else if (nInt == 4) PackInit_DumbType<int, 4, 1>(link);
else if (nInt % 4 == 0) PackInit_DumbType<int, 4, 0>(link);
else if (nInt == 2) PackInit_DumbType<int, 2, 1>(link);
else if (nInt % 2 == 0) PackInit_DumbType<int, 2, 0>(link);
else if (nInt == 1) PackInit_DumbType<int, 1, 1>(link);
else if (nInt % 1 == 0)
#endif
PackInit_DumbType<int, 1, 0>(link);
}
}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct cudaDeviceProp props;
PetscCallCUDA(cudaGetDevice(&device));
PetscCallCUDA(cudaGetDeviceProperties(&props, device));
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor * props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->stream = PetscDefaultCudaStream;
link->Destroy = PetscSFLinkDestroy_MPI_CUDA;
link->SyncDevice = PetscSFLinkSyncDevice_CUDA;
link->SyncStream = PetscSFLinkSyncStream_CUDA;
link->Memcpy = PetscSFLinkMemcpy_CUDA;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(PETSC_SUCCESS);
}
|
0c30d374ab2628c5a4ebd8aa9ef5155fafccd3c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/data_loss.hpp"
#include "caffe/layers/st_layer.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/power_layer.hpp"
#include "caffe/layers/eltwise_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void DataLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
stn_layer_->Forward(stn_bottom_vec_,stn_top_vec_);
Dtype dot, loss;
diff_layer_->Forward(diff_bottom_vec_, diff_top_vec_);
Blob<Dtype> *diffptr = diff_top_vec_[0];
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (this->layer_param_.data_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
} else {
normalize_coeff_ = num;
}
// set masked (NaNs only) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.data_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.data_loss_param().plateau() * this->layer_param_.data_loss_param().plateau();
hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void DataLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) {prop_down |= propagate_down[1];
prop_down |= propagate_down[2];
}
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
const Dtype alpha = top[0]->cpu_diff()[0] ;
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.data_loss_param().plateau() > 0) {
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
vector<bool> propagate_down2(2,true);
diff_layer_->Backward(diff_top_vec_, propagate_down2, diff_bottom_vec_);
stn_layer_->Backward(stn_top_vec_,propagate_down2,stn_bottom_vec_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DataLossLayer);
} // namespace caffe
| 0c30d374ab2628c5a4ebd8aa9ef5155fafccd3c0.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/data_loss.hpp"
#include "caffe/layers/st_layer.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/power_layer.hpp"
#include "caffe/layers/eltwise_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void DataLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
stn_layer_->Forward(stn_bottom_vec_,stn_top_vec_);
Dtype dot, loss;
diff_layer_->Forward(diff_bottom_vec_, diff_top_vec_);
Blob<Dtype> *diffptr = diff_top_vec_[0];
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (this->layer_param_.data_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
} else {
normalize_coeff_ = num;
}
// set masked (NaNs only) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.data_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.data_loss_param().plateau() * this->layer_param_.data_loss_param().plateau();
MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void DataLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) {prop_down |= propagate_down[1];
prop_down |= propagate_down[2];
}
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
const Dtype alpha = top[0]->cpu_diff()[0] ;
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.data_loss_param().plateau() > 0) {
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>(
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
vector<bool> propagate_down2(2,true);
diff_layer_->Backward(diff_top_vec_, propagate_down2, diff_bottom_vec_);
stn_layer_->Backward(stn_top_vec_,propagate_down2,stn_bottom_vec_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DataLossLayer);
} // namespace caffe
|
79492eecec0e25584c7857c61cea4e4c947bd07c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/rocm/math/bias_softmax.h"
#include <limits>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/common.h"
#include "core/providers/rocm/miopen_common.h"
#include "core/providers/rocm/shared_inc/accumulation_type.h"
#include "core/providers/rocm/math/binary_elementwise_ops_impl_functors.cuh"
#include "core/providers/rocm/math/softmax_warpwise_impl.cuh"
using namespace onnxruntime;
using namespace onnxruntime::rocm;
namespace onnxruntime {
namespace contrib {
namespace rocm {
// Duplicated softmax_impl.cu here
// So far attempt to use shared kernel with additional template resulted in lost performance
// Note: The intended case for 'input_bias' is the input sequence mask for transformer models
// As an additive mask, it should be zero for preserved tokens and -infty for tokens to screen
// The mask will broadcast from [batch_size, 1, 1, seq_len] to input [batch_size, num_heads, seq_len, seq_len]
// Here element_count = seq_len and bias_broadcast_size_per_batch = num_heads * seq_len
// The softmax + additive mask fusion follows NVIDIA apex's additive_masked_softmax_warp_forward
// see https://github.com/NVIDIA/apex/blob/4ef930c1c884fdca5f472ab2ce7cb9b505d26c1a/apex/contrib/csrc/multihead_attn/softmax.h
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
__global__ void BiasSoftmaxWarpForward(
output_t* output,
const input_t* input,
const input_t* input_bias,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_count_per_batch) {
// "WARP" refers to cooperative threads and might not equal 32 threads of GPU warp
// thread block is (WARP_SIZE, 128/WARP_SIZE)
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = next_power_of_two < GPU_WARP_SIZE ? next_power_of_two : GPU_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int WARP_BATCH = 1;
// each "WARP" (<=32) processes WARP_BATCH(one of {1,2}) batches
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// last warp may have fewer batches
int local_batches = batch_count - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// thread will process elements (local_index + n * warp_size) within batch
int local_idx = threadIdx.x;
// push input, input_bias output pointers to batch we need to process
input += first_batch * batch_stride + local_idx;
output += first_batch * batch_stride + local_idx;
// load from global memory and apply bias (likely an additive mask)
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
// the bias has assumed shape [batch_size, element_count]
// .. and needs to broadcast to [batch_size, broadcast_size, element_count]
int bias_offset = (first_batch + i) / bias_broadcast_count_per_batch * batch_stride + local_idx;
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = (acc_t)input[i * element_count + it * WARP_SIZE] + (acc_t)input_bias[bias_offset + it * WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// find maximum value within batch for numerical stability
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// normalization factor Z = Sum[ exp(element_i), for element_i in batch ]
acc_t sum[WARP_BATCH]{acc_t(0.0)};
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
elements[i][it] = expf((acc_t)(elements[i][it] - max_value[i]));
sum[i] += elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// write back normalized value = exp(element_i)/Z to global memory
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
output[i * element_count + it * WARP_SIZE] = elements[i][it] / sum[i];
} else {
break;
}
}
}
}
template <typename T>
void DispatchBiasSoftmaxForwardImpl(
hipStream_t stream,
Tensor* output_tensor,
const Tensor* input_tensor,
const Tensor* input_bias_tensor,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_size_per_batch) {
typedef typename ToHipType<T>::MappedType HipT;
typedef HipT input_t;
typedef HipT output_t;
typedef AccumulationType_t<HipT> acc_t;
const auto* input = reinterpret_cast<const HipT*>(input_tensor->template Data<T>());
const auto* input_bias = reinterpret_cast<const HipT*>(input_bias_tensor->template Data<T>());
auto* output = reinterpret_cast<HipT*>(output_tensor->template MutableData<T>());
if (element_count == 0)
return;
int log2_elements = log2_ceil(element_count);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = ::min(next_power_of_two, GPU_WARP_SIZE);
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
// int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
int batches_per_warp = 1;
// use 128 threads per block to maximize gpu utilization
constexpr int threads_per_block = 256;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 0>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 1: // 2
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 1>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 2: // 4
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 2>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 3: // 8
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 3>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 4: // 16
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 4>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 5: // 32
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 5>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 6: // 64
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 6>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 7: // 128
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 7>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 8: // 256
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 8>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 9: // 512
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 9>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 10: // 1024
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 10>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
default:
break;
}
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL(T) \
template void DispatchBiasSoftmaxForwardImpl<T>( \
hipStream_t stream, \
Tensor * output_tensor, \
const Tensor* input_tensor, \
const Tensor* input_bias_tensor, \
int element_count, \
int batch_count, \
int batch_stride, \
int bias_broadcast_size_per_batch);
SPECIALIZED_BIAS_SOFTMAX_IMPL(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL(MLFloat16)
// For large element count we fall back to explicit Add kernel + CUDA DNN library
// note: This is an unhappy path! There is no performance benefit for the fusion.
template <typename T>
void DispatchBiasSoftMaxForwardViaDnnLibraryImpl(
hipStream_t stream,
miopenHandle_t miopenHandle,
int element_count,
int batch_count,
int broadcast_axis,
int softmax_axis,
const onnxruntime::TensorShape& X_shape,
const onnxruntime::Tensor* X,
const onnxruntime::TensorShape& B_shape,
const onnxruntime::Tensor* B,
onnxruntime::Tensor* Y) {
typedef typename ToHipType<T>::MappedType HipT;
const auto* X_data = reinterpret_cast<const HipT*>(X->template Data<T>());
const auto* B_data = reinterpret_cast<const HipT*>(B->template Data<T>());
auto* Y_data = reinterpret_cast<HipT*>(Y->template MutableData<T>());
int X_num_dim = static_cast<int>(X_shape.NumDimensions());
// binary elementise kernel requires input pitches
TArray<int64_t> lhs_padded_strides(X_num_dim);
for (int i = -1, lhs_pitch = 1; i >= -X_num_dim; i--) {
int positive_i = X_num_dim + i;
lhs_padded_strides[positive_i] = lhs_pitch;
lhs_pitch *= X_shape[positive_i];
}
// set pitches for bias so it broadcasts along relevant dimensions
TArray<int64_t> rhs_padded_strides(X_num_dim);
for (int i = -1, rhs_pitch = 1; i >= -X_num_dim; i--) {
int positive_ix = X_num_dim + i;
int positive_ib = static_cast<int>(B_shape.NumDimensions()) + i;
if (broadcast_axis <= positive_ix && positive_ix < softmax_axis) {
rhs_padded_strides[positive_ix] = 0;
continue;
}
rhs_padded_strides[positive_ix] = rhs_pitch;
rhs_pitch *= B_shape[positive_ib];
}
TArray<fast_divmod> fdm_output_strides(X_num_dim);
for (int i = 0; i < fdm_output_strides.Size(); i++)
fdm_output_strides[i] = fast_divmod(lhs_padded_strides[i]);
fast_divmod fdm_H, fdm_C;
// invoke elementwise add with broadcast kernel
::onnxruntime::rocm::BinaryElementWiseImpl(
stream,
(int32_t)X_num_dim,
&lhs_padded_strides,
X_data,
&rhs_padded_strides,
B_data,
&fdm_output_strides,
fdm_H,
fdm_C,
Y_data,
OP_Add<HipT, HipT, HipT>(),
(size_t)X_shape.Size());
// invoke cuda DNN library for Y = softmax(X)
std::vector<int64_t> dims({batch_count, 1, 1, element_count});
const auto alpha = Consts<HipT>::One;
const auto beta = Consts<HipT>::Zero;
onnxruntime::rocm::MiopenTensor input_tensor, output_tensor;
input_tensor.Set(dims, onnxruntime::rocm::MiopenTensor::GetDataType<HipT>());
output_tensor.Set(dims, onnxruntime::rocm::MiopenTensor::GetDataType<HipT>());
miopenSoftmaxForward_V2(
miopenHandle,
&alpha,
input_tensor,
Y_data,
&beta,
output_tensor,
Y_data,
MIOPEN_SOFTMAX_ACCURATE,
MIOPEN_SOFTMAX_MODE_INSTANCE);
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(T) \
template void DispatchBiasSoftMaxForwardViaDnnLibraryImpl<T>( \
hipStream_t stream, \
miopenHandle_t miopenHandle, \
int element_count, \
int batch_count, \
int broadcast_axis, \
int softmax_axis, \
const onnxruntime::TensorShape& X_shape, \
const Tensor* X_data, \
const onnxruntime::TensorShape& B_shape, \
const Tensor* B_data, \
Tensor* Y_data);
// SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(MLFloat16)
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
| 79492eecec0e25584c7857c61cea4e4c947bd07c.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/rocm/math/bias_softmax.h"
#include <limits>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/common.h"
#include "core/providers/rocm/miopen_common.h"
#include "core/providers/rocm/shared_inc/accumulation_type.h"
#include "core/providers/rocm/math/binary_elementwise_ops_impl_functors.cuh"
#include "core/providers/rocm/math/softmax_warpwise_impl.cuh"
using namespace onnxruntime;
using namespace onnxruntime::rocm;
namespace onnxruntime {
namespace contrib {
namespace rocm {
// Duplicated softmax_impl.cu here
// So far attempt to use shared kernel with additional template resulted in lost performance
// Note: The intended case for 'input_bias' is the input sequence mask for transformer models
// As an additive mask, it should be zero for preserved tokens and -infty for tokens to screen
// The mask will broadcast from [batch_size, 1, 1, seq_len] to input [batch_size, num_heads, seq_len, seq_len]
// Here element_count = seq_len and bias_broadcast_size_per_batch = num_heads * seq_len
// The softmax + additive mask fusion follows NVIDIA apex's additive_masked_softmax_warp_forward
// see https://github.com/NVIDIA/apex/blob/4ef930c1c884fdca5f472ab2ce7cb9b505d26c1a/apex/contrib/csrc/multihead_attn/softmax.h
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
__global__ void BiasSoftmaxWarpForward(
output_t* output,
const input_t* input,
const input_t* input_bias,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_count_per_batch) {
// "WARP" refers to cooperative threads and might not equal 32 threads of GPU warp
// thread block is (WARP_SIZE, 128/WARP_SIZE)
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = next_power_of_two < GPU_WARP_SIZE ? next_power_of_two : GPU_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int WARP_BATCH = 1;
// each "WARP" (<=32) processes WARP_BATCH(one of {1,2}) batches
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// last warp may have fewer batches
int local_batches = batch_count - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// thread will process elements (local_index + n * warp_size) within batch
int local_idx = threadIdx.x;
// push input, input_bias output pointers to batch we need to process
input += first_batch * batch_stride + local_idx;
output += first_batch * batch_stride + local_idx;
// load from global memory and apply bias (likely an additive mask)
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
// the bias has assumed shape [batch_size, element_count]
// .. and needs to broadcast to [batch_size, broadcast_size, element_count]
int bias_offset = (first_batch + i) / bias_broadcast_count_per_batch * batch_stride + local_idx;
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = (acc_t)input[i * element_count + it * WARP_SIZE] + (acc_t)input_bias[bias_offset + it * WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// find maximum value within batch for numerical stability
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// normalization factor Z = Sum[ exp(element_i), for element_i in batch ]
acc_t sum[WARP_BATCH]{acc_t(0.0)};
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
elements[i][it] = expf((acc_t)(elements[i][it] - max_value[i]));
sum[i] += elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// write back normalized value = exp(element_i)/Z to global memory
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
output[i * element_count + it * WARP_SIZE] = elements[i][it] / sum[i];
} else {
break;
}
}
}
}
template <typename T>
void DispatchBiasSoftmaxForwardImpl(
hipStream_t stream,
Tensor* output_tensor,
const Tensor* input_tensor,
const Tensor* input_bias_tensor,
int element_count,
int batch_count,
int batch_stride,
int bias_broadcast_size_per_batch) {
typedef typename ToHipType<T>::MappedType HipT;
typedef HipT input_t;
typedef HipT output_t;
typedef AccumulationType_t<HipT> acc_t;
const auto* input = reinterpret_cast<const HipT*>(input_tensor->template Data<T>());
const auto* input_bias = reinterpret_cast<const HipT*>(input_bias_tensor->template Data<T>());
auto* output = reinterpret_cast<HipT*>(output_tensor->template MutableData<T>());
if (element_count == 0)
return;
int log2_elements = log2_ceil(element_count);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = std::min(next_power_of_two, GPU_WARP_SIZE);
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
// int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
int batches_per_warp = 1;
// use 128 threads per block to maximize gpu utilization
constexpr int threads_per_block = 256;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 0>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 1: // 2
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 1>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 2: // 4
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 2>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 3: // 8
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 3>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 4: // 16
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 4>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 5: // 32
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 5>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 6: // 64
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 6>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 7: // 128
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 7>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 8: // 256
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 8>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 9: // 512
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 9>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
case 10: // 1024
hipLaunchKernelGGL(HIP_KERNEL_NAME(BiasSoftmaxWarpForward<input_t, output_t, acc_t, 10>), dim3(blocks), dim3(threads), 0, stream,
output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch);
break;
default:
break;
}
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL(T) \
template void DispatchBiasSoftmaxForwardImpl<T>( \
hipStream_t stream, \
Tensor * output_tensor, \
const Tensor* input_tensor, \
const Tensor* input_bias_tensor, \
int element_count, \
int batch_count, \
int batch_stride, \
int bias_broadcast_size_per_batch);
SPECIALIZED_BIAS_SOFTMAX_IMPL(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL(MLFloat16)
// For large element count we fall back to explicit Add kernel + CUDA DNN library
// note: This is an unhappy path! There is no performance benefit for the fusion.
template <typename T>
void DispatchBiasSoftMaxForwardViaDnnLibraryImpl(
hipStream_t stream,
miopenHandle_t miopenHandle,
int element_count,
int batch_count,
int broadcast_axis,
int softmax_axis,
const onnxruntime::TensorShape& X_shape,
const onnxruntime::Tensor* X,
const onnxruntime::TensorShape& B_shape,
const onnxruntime::Tensor* B,
onnxruntime::Tensor* Y) {
typedef typename ToHipType<T>::MappedType HipT;
const auto* X_data = reinterpret_cast<const HipT*>(X->template Data<T>());
const auto* B_data = reinterpret_cast<const HipT*>(B->template Data<T>());
auto* Y_data = reinterpret_cast<HipT*>(Y->template MutableData<T>());
int X_num_dim = static_cast<int>(X_shape.NumDimensions());
// binary elementise kernel requires input pitches
TArray<int64_t> lhs_padded_strides(X_num_dim);
for (int i = -1, lhs_pitch = 1; i >= -X_num_dim; i--) {
int positive_i = X_num_dim + i;
lhs_padded_strides[positive_i] = lhs_pitch;
lhs_pitch *= X_shape[positive_i];
}
// set pitches for bias so it broadcasts along relevant dimensions
TArray<int64_t> rhs_padded_strides(X_num_dim);
for (int i = -1, rhs_pitch = 1; i >= -X_num_dim; i--) {
int positive_ix = X_num_dim + i;
int positive_ib = static_cast<int>(B_shape.NumDimensions()) + i;
if (broadcast_axis <= positive_ix && positive_ix < softmax_axis) {
rhs_padded_strides[positive_ix] = 0;
continue;
}
rhs_padded_strides[positive_ix] = rhs_pitch;
rhs_pitch *= B_shape[positive_ib];
}
TArray<fast_divmod> fdm_output_strides(X_num_dim);
for (int i = 0; i < fdm_output_strides.Size(); i++)
fdm_output_strides[i] = fast_divmod(lhs_padded_strides[i]);
fast_divmod fdm_H, fdm_C;
// invoke elementwise add with broadcast kernel
::onnxruntime::rocm::BinaryElementWiseImpl(
stream,
(int32_t)X_num_dim,
&lhs_padded_strides,
X_data,
&rhs_padded_strides,
B_data,
&fdm_output_strides,
fdm_H,
fdm_C,
Y_data,
OP_Add<HipT, HipT, HipT>(),
(size_t)X_shape.Size());
// invoke cuda DNN library for Y = softmax(X)
std::vector<int64_t> dims({batch_count, 1, 1, element_count});
const auto alpha = Consts<HipT>::One;
const auto beta = Consts<HipT>::Zero;
onnxruntime::rocm::MiopenTensor input_tensor, output_tensor;
input_tensor.Set(dims, onnxruntime::rocm::MiopenTensor::GetDataType<HipT>());
output_tensor.Set(dims, onnxruntime::rocm::MiopenTensor::GetDataType<HipT>());
miopenSoftmaxForward_V2(
miopenHandle,
&alpha,
input_tensor,
Y_data,
&beta,
output_tensor,
Y_data,
MIOPEN_SOFTMAX_ACCURATE,
MIOPEN_SOFTMAX_MODE_INSTANCE);
}
#define SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(T) \
template void DispatchBiasSoftMaxForwardViaDnnLibraryImpl<T>( \
hipStream_t stream, \
miopenHandle_t miopenHandle, \
int element_count, \
int batch_count, \
int broadcast_axis, \
int softmax_axis, \
const onnxruntime::TensorShape& X_shape, \
const Tensor* X_data, \
const onnxruntime::TensorShape& B_shape, \
const Tensor* B_data, \
Tensor* Y_data);
// SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(double)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(float)
SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(MLFloat16)
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
|
c706318d762fee02e074b36670bea25d075ccb20.hip | // !!! This is a file automatically generated by hipify!!!
/*
SOURCE OPERATOR DEVICE MATRIX OPERATIONS GPU
BASICALLY ONLY USED FOR Testing
CONTAINS COPY OVERHAD
AUTHOR : FABIAN DECHENT / JANNIS SCHRMANN
DATE : 11.08.2020
TO-DO :
CAUTION :
*/
// c++ standard headers
#include <iostream>
// standard c headers
#include <assert.h>
// own headers
#include "../common.h"
#include "matrix_operator.h"
#include "matrix_operator_gpu.h"
#include "kernel_utils.h"
#include "../global.h"
// cublas headers
#include "rocblas.h"
#include <hip/hip_runtime.h>
void add_reduce_dim_gpu(const double* mat_in,double *vec_out, int rows,int cols, int dim_red,int size_vec){
double *dev_mat_in,*dev_vec_out;
CHECK(hipMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&dev_vec_out, size_vec*sizeof(double)));
CHECK(hipMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), hipMemcpyHostToDevice));
add_reduce_dim_onDev(dev_mat_in,dev_vec_out, rows,cols, dim_red,size_vec);
CHECK(hipMemcpy(vec_out, dev_vec_out, size_vec*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(dev_mat_in));
CHECK(hipFree(dev_vec_out));
}
void add_along_axis_gpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_add, int size_vec){
double *dev_mat_in,*dev_mat_out,*dev_vec;
CHECK(hipMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&dev_mat_out, rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&dev_vec, size_vec*sizeof(double)));
CHECK(hipMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_vec, vec, size_vec*sizeof(double), hipMemcpyHostToDevice));
add_along_axis_onDev(dev_mat_in,dev_vec,dev_mat_out,rows,cols,dim_add,size_vec);
CHECK(hipMemcpy(mat_out, dev_mat_out, rows*cols*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(dev_mat_in));
CHECK(hipFree(dev_mat_out));
CHECK(hipFree(dev_vec));
}
void div_along_axis_gpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_div, int size_vec){
double *dev_mat_in,*dev_mat_out,*dev_vec;
CHECK(hipMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&dev_mat_out, rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&dev_vec, size_vec*sizeof(double)));
CHECK(hipMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_vec, vec, size_vec*sizeof(double), hipMemcpyHostToDevice));
div_along_axis_onDev(dev_mat_in,dev_vec,dev_mat_out,rows,cols,dim_div,size_vec);
CHECK(hipMemcpy(mat_out, dev_mat_out, rows*cols*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(dev_mat_in));
CHECK(hipFree(dev_mat_out));
CHECK(hipFree(dev_vec));
}
//___________________________________________________________________________________________________
// matrix ard_gpu
void matrix_hadamard_gpu(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(hipMalloc((void**)&d_res, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(hipMemcpy(d_lhs, lhs, size*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_rhs, rhs, size*sizeof(double), hipMemcpyHostToDevice));
// calling ard onDev
matrix_hadamard_onDev(d_res, d_lhs, d_rhs, size, threads_block);
// moving matrices back from memory
CHECK(hipMemcpy(res, d_res, size*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(d_res));
CHECK(hipFree(d_lhs));
CHECK(hipFree(d_rhs));
}
//___________________________________________________________________________________________________
// matrix hadamard_gpu
void matrix_add_gpu(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(hipMalloc((void**)&d_res, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(hipMemcpy(d_lhs, lhs, size*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_rhs, rhs, size*sizeof(double), hipMemcpyHostToDevice));
// calling add onDev
matrix_add_onDev( d_res , d_lhs, d_rhs, size, threads_block);
// moving matrices back from memory
CHECK(hipMemcpy(res, d_res, size*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(d_res));
CHECK(hipFree(d_lhs));
CHECK(hipFree(d_rhs));
}
void mulAdd_gpu(double* res, const double* lhs, const double* rhs, const double factor, int size,int threads_block)
{
// alloc cuda storage
double *dev_res,*dev_lhs,*dev_rhs;
CHECK(hipMalloc((void**)&dev_res, size*sizeof(double)));
CHECK(hipMalloc((void**)&dev_lhs, size*sizeof(double)));
CHECK(hipMalloc((void**)&dev_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(hipMemcpy(dev_lhs, lhs, size*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_rhs, rhs, size*sizeof(double), hipMemcpyHostToDevice));
mulAdd_onDev(dev_res,dev_lhs,dev_rhs,factor,size,threads_block);
// moving matrices back from memory
CHECK(hipMemcpy(res, dev_res, size*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(dev_res));
CHECK(hipFree(dev_lhs));
CHECK(hipFree(dev_rhs));
}
void mat_transpose_gpu(const double* mat_in, double* mat_out, int rows, int cols, int threads_block){
double *d_mat_in,*d_mat_out;
CHECK(hipMalloc((void**)&d_mat_in,rows*cols*sizeof(double)));
CHECK(hipMalloc((void**)&d_mat_out,rows*cols*sizeof(double)));
CHECK(hipMemcpy(d_mat_in, mat_in, rows*cols*sizeof(double), hipMemcpyHostToDevice));
mat_transpose_onDev(d_mat_in, d_mat_out, rows, cols, threads_block);
// Read C from device memory
hipMemcpy(mat_out, d_mat_out, rows*cols*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_mat_in));
CHECK(hipFree(d_mat_out));
}
void matMul_gpu1(const double *A, const double *B, int M,int N,int K,double *C, int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
matMul_onDev1(d_A,d_B, M,N,K,d_C, threads_block);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
CHECK(hipFree(d_B));
}
void matMul_gpu2(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
matMul_onDev2(d_A, d_B, M, N, K,d_C, threads_block);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_B));
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
}
void matMul_gpu_dsm(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
matMul_dsm_onDev(d_A,d_B, M, N, K,d_C,threads_block);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
void matMul_gpu_dsm_coa(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
matMul_dsm_coa_onDev(d_A, d_B, M, N, K,d_C,threads_block);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
//___________________________________________________________________________________________________
// matMul_cublas
// computes the matrix product of double matrices with arbitrary size on device
// utilisation of cublas
void matMul_cublas(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
const double alpha=1.0;
const double beta=0.0;
// Invoke kernel
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K,&alpha,(const double *)d_B, N,(const double *)d_A, K,&beta,(double *)d_C, N);
CHECK(hipDeviceSynchronize());
hipblasDestroy(handle);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
void matMul_gpu_sm(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(hipMemcpy(d_A, A, M*K*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, K*N*sizeof(double), hipMemcpyHostToDevice));
matMul_sm_onDev(d_A, d_B, M,N, K,d_C);
// Read C from device memory
hipMemcpy(C, d_C, M*N*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
void matMul_gpu_sm_tr(const double *A, const double *B,int A_TRANSP,int B_TRANSP,int rows_op_A,int cols_op_A,int rows_op_B,int cols_op_B, double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,rows_op_A*cols_op_A*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,rows_op_B*cols_op_B*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,rows_op_A*cols_op_B*sizeof(double)));
CHECK(hipMemcpy(d_A, A, rows_op_A*cols_op_A*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, rows_op_B*cols_op_B*sizeof(double), hipMemcpyHostToDevice));
matMul_sm_onDev_tr(d_A,d_B,A_TRANSP,B_TRANSP, rows_op_A,cols_op_A,rows_op_B,cols_op_B,d_C);
// Read C from device memory
hipMemcpy(C, d_C, rows_op_A*cols_op_B*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
void matMul_gpu_sm_tr_ind(const double *A, const double *B,int A_TRANSP,int B_TRANSP,int rows_op_A,int cols_op_A,int rows_op_B,int cols_op_B, double *C)
{
double *d_A,*d_B,*d_C;
CHECK(hipMalloc((void**)&d_A,rows_op_A*cols_op_A*sizeof(double)));
CHECK(hipMalloc((void**)&d_B,rows_op_B*cols_op_B*sizeof(double)));
CHECK(hipMalloc((void**)&d_C,rows_op_A*cols_op_B*sizeof(double)));
CHECK(hipMemcpy(d_A, A, rows_op_A*cols_op_A*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, rows_op_B*cols_op_B*sizeof(double), hipMemcpyHostToDevice));
matMul_sm_onDev_tr_ind(d_A,d_B,A_TRANSP,B_TRANSP, rows_op_A,cols_op_A,rows_op_B,cols_op_B,d_C);
// Read C from device memory
hipMemcpy(C, d_C, rows_op_A*cols_op_B*sizeof(double),hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
// Free device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
| c706318d762fee02e074b36670bea25d075ccb20.cu | /*
SOURCE OPERATOR DEVICE MATRIX OPERATIONS GPU
BASICALLY ONLY USED FOR Testing
CONTAINS COPY OVERHAD
AUTHOR : FABIAN DECHENT / JANNIS SCHÜRMANN
DATE : 11.08.2020
TO-DO :
CAUTION :
*/
// c++ standard headers
#include <iostream>
// standard c headers
#include <assert.h>
// own headers
#include "../common.h"
#include "matrix_operator.h"
#include "matrix_operator_gpu.h"
#include "kernel_utils.h"
#include "../global.h"
// cublas headers
#include "cublas_v2.h"
#include <cuda_runtime.h>
void add_reduce_dim_gpu(const double* mat_in,double *vec_out, int rows,int cols, int dim_red,int size_vec){
double *dev_mat_in,*dev_vec_out;
CHECK(cudaMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_vec_out, size_vec*sizeof(double)));
CHECK(cudaMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), cudaMemcpyHostToDevice));
add_reduce_dim_onDev(dev_mat_in,dev_vec_out, rows,cols, dim_red,size_vec);
CHECK(cudaMemcpy(vec_out, dev_vec_out, size_vec*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(dev_mat_in));
CHECK(cudaFree(dev_vec_out));
}
void add_along_axis_gpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_add, int size_vec){
double *dev_mat_in,*dev_mat_out,*dev_vec;
CHECK(cudaMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_mat_out, rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_vec, size_vec*sizeof(double)));
CHECK(cudaMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_vec, vec, size_vec*sizeof(double), cudaMemcpyHostToDevice));
add_along_axis_onDev(dev_mat_in,dev_vec,dev_mat_out,rows,cols,dim_add,size_vec);
CHECK(cudaMemcpy(mat_out, dev_mat_out, rows*cols*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(dev_mat_in));
CHECK(cudaFree(dev_mat_out));
CHECK(cudaFree(dev_vec));
}
void div_along_axis_gpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_div, int size_vec){
double *dev_mat_in,*dev_mat_out,*dev_vec;
CHECK(cudaMalloc((void**)&dev_mat_in, rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_mat_out, rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_vec, size_vec*sizeof(double)));
CHECK(cudaMemcpy(dev_mat_in, mat_in, rows*cols*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_vec, vec, size_vec*sizeof(double), cudaMemcpyHostToDevice));
div_along_axis_onDev(dev_mat_in,dev_vec,dev_mat_out,rows,cols,dim_div,size_vec);
CHECK(cudaMemcpy(mat_out, dev_mat_out, rows*cols*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(dev_mat_in));
CHECK(cudaFree(dev_mat_out));
CHECK(cudaFree(dev_vec));
}
//___________________________________________________________________________________________________
// matrix ard_gpu
void matrix_hadamard_gpu(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(cudaMalloc((void**)&d_res, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(cudaMemcpy(d_lhs, lhs, size*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_rhs, rhs, size*sizeof(double), cudaMemcpyHostToDevice));
// calling ard onDev
matrix_hadamard_onDev(d_res, d_lhs, d_rhs, size, threads_block);
// moving matrices back from memory
CHECK(cudaMemcpy(res, d_res, size*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(d_res));
CHECK(cudaFree(d_lhs));
CHECK(cudaFree(d_rhs));
}
//___________________________________________________________________________________________________
// matrix hadamard_gpu
void matrix_add_gpu(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(cudaMalloc((void**)&d_res, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(cudaMemcpy(d_lhs, lhs, size*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_rhs, rhs, size*sizeof(double), cudaMemcpyHostToDevice));
// calling add onDev
matrix_add_onDev( d_res , d_lhs, d_rhs, size, threads_block);
// moving matrices back from memory
CHECK(cudaMemcpy(res, d_res, size*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(d_res));
CHECK(cudaFree(d_lhs));
CHECK(cudaFree(d_rhs));
}
void mulAdd_gpu(double* res, const double* lhs, const double* rhs, const double factor, int size,int threads_block)
{
// alloc cuda storage
double *dev_res,*dev_lhs,*dev_rhs;
CHECK(cudaMalloc((void**)&dev_res, size*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_lhs, size*sizeof(double)));
CHECK(cudaMalloc((void**)&dev_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(cudaMemcpy(dev_lhs, lhs, size*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_rhs, rhs, size*sizeof(double), cudaMemcpyHostToDevice));
mulAdd_onDev(dev_res,dev_lhs,dev_rhs,factor,size,threads_block);
// moving matrices back from memory
CHECK(cudaMemcpy(res, dev_res, size*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(dev_res));
CHECK(cudaFree(dev_lhs));
CHECK(cudaFree(dev_rhs));
}
void mat_transpose_gpu(const double* mat_in, double* mat_out, int rows, int cols, int threads_block){
double *d_mat_in,*d_mat_out;
CHECK(cudaMalloc((void**)&d_mat_in,rows*cols*sizeof(double)));
CHECK(cudaMalloc((void**)&d_mat_out,rows*cols*sizeof(double)));
CHECK(cudaMemcpy(d_mat_in, mat_in, rows*cols*sizeof(double), cudaMemcpyHostToDevice));
mat_transpose_onDev(d_mat_in, d_mat_out, rows, cols, threads_block);
// Read C from device memory
cudaMemcpy(mat_out, d_mat_out, rows*cols*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_mat_in));
CHECK(cudaFree(d_mat_out));
}
void matMul_gpu1(const double *A, const double *B, int M,int N,int K,double *C, int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
matMul_onDev1(d_A,d_B, M,N,K,d_C, threads_block);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
CHECK(cudaFree(d_B));
}
void matMul_gpu2(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
matMul_onDev2(d_A, d_B, M, N, K,d_C, threads_block);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
}
void matMul_gpu_dsm(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
matMul_dsm_onDev(d_A,d_B, M, N, K,d_C,threads_block);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
void matMul_gpu_dsm_coa(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
matMul_dsm_coa_onDev(d_A, d_B, M, N, K,d_C,threads_block);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
//___________________________________________________________________________________________________
// matMul_cublas
// computes the matrix product of double matrices with arbitrary size on device
// utilisation of cublas
void matMul_cublas(const double *A, const double *B, int M,int N,int K,double *C,int threads_block)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
const double alpha=1.0;
const double beta=0.0;
// Invoke kernel
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K,&alpha,(const double *)d_B, N,(const double *)d_A, K,&beta,(double *)d_C, N);
CHECK(cudaDeviceSynchronize());
cublasDestroy(handle);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
void matMul_gpu_sm(const double *A, const double *B, int M,int N,int K,double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,M*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,N*K*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,M*N*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, M*K*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, K*N*sizeof(double), cudaMemcpyHostToDevice));
matMul_sm_onDev(d_A, d_B, M,N, K,d_C);
// Read C from device memory
cudaMemcpy(C, d_C, M*N*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
void matMul_gpu_sm_tr(const double *A, const double *B,int A_TRANSP,int B_TRANSP,int rows_op_A,int cols_op_A,int rows_op_B,int cols_op_B, double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,rows_op_A*cols_op_A*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,rows_op_B*cols_op_B*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,rows_op_A*cols_op_B*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, rows_op_A*cols_op_A*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, rows_op_B*cols_op_B*sizeof(double), cudaMemcpyHostToDevice));
matMul_sm_onDev_tr(d_A,d_B,A_TRANSP,B_TRANSP, rows_op_A,cols_op_A,rows_op_B,cols_op_B,d_C);
// Read C from device memory
cudaMemcpy(C, d_C, rows_op_A*cols_op_B*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
void matMul_gpu_sm_tr_ind(const double *A, const double *B,int A_TRANSP,int B_TRANSP,int rows_op_A,int cols_op_A,int rows_op_B,int cols_op_B, double *C)
{
double *d_A,*d_B,*d_C;
CHECK(cudaMalloc((void**)&d_A,rows_op_A*cols_op_A*sizeof(double)));
CHECK(cudaMalloc((void**)&d_B,rows_op_B*cols_op_B*sizeof(double)));
CHECK(cudaMalloc((void**)&d_C,rows_op_A*cols_op_B*sizeof(double)));
CHECK(cudaMemcpy(d_A, A, rows_op_A*cols_op_A*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, rows_op_B*cols_op_B*sizeof(double), cudaMemcpyHostToDevice));
matMul_sm_onDev_tr_ind(d_A,d_B,A_TRANSP,B_TRANSP, rows_op_A,cols_op_A,rows_op_B,cols_op_B,d_C);
// Read C from device memory
cudaMemcpy(C, d_C, rows_op_A*cols_op_B*sizeof(double),cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
// Free device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
|
81c3c86391cd7808612f4e9a4752bf38248db19e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void make_pillar_histo_kernel( const float* dev_points, float* dev_pillar_x_in_coors, float* dev_pillar_y_in_coors, float* dev_pillar_z_in_coors, float* dev_pillar_i_in_coors, int* pillar_count_histo, const int num_points, const int max_points_per_pillar, const int GRID_X_SIZE, const int GRID_Y_SIZE, const int GRID_Z_SIZE, const float MIN_X_RANGE, const float MIN_Y_RANGE, const float MIN_Z_RANGE, const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const float PILLAR_Z_SIZE, const int NUM_BOX_CORNERS )
{
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if(th_i >= num_points)
{
return;
}
int y_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 1] - MIN_Y_RANGE)/PILLAR_Y_SIZE);
int x_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 0] - MIN_X_RANGE)/PILLAR_X_SIZE);
int z_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 2] - MIN_Z_RANGE)/PILLAR_Z_SIZE);
if(x_coor >= 0 && x_coor < GRID_X_SIZE &&
y_coor >= 0 && y_coor < GRID_Y_SIZE &&
z_coor >= 0 && z_coor < GRID_Z_SIZE)
{
int count = atomicAdd(&pillar_count_histo[y_coor*GRID_X_SIZE + x_coor], 1);
if(count < max_points_per_pillar)
{
int ind = y_coor*GRID_X_SIZE*max_points_per_pillar + x_coor*max_points_per_pillar + count;
dev_pillar_x_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 0];
dev_pillar_y_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 1];
dev_pillar_z_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 2];
dev_pillar_i_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 3];
}
}
} | 81c3c86391cd7808612f4e9a4752bf38248db19e.cu | #include "includes.h"
__global__ void make_pillar_histo_kernel( const float* dev_points, float* dev_pillar_x_in_coors, float* dev_pillar_y_in_coors, float* dev_pillar_z_in_coors, float* dev_pillar_i_in_coors, int* pillar_count_histo, const int num_points, const int max_points_per_pillar, const int GRID_X_SIZE, const int GRID_Y_SIZE, const int GRID_Z_SIZE, const float MIN_X_RANGE, const float MIN_Y_RANGE, const float MIN_Z_RANGE, const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const float PILLAR_Z_SIZE, const int NUM_BOX_CORNERS )
{
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if(th_i >= num_points)
{
return;
}
int y_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 1] - MIN_Y_RANGE)/PILLAR_Y_SIZE);
int x_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 0] - MIN_X_RANGE)/PILLAR_X_SIZE);
int z_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 2] - MIN_Z_RANGE)/PILLAR_Z_SIZE);
if(x_coor >= 0 && x_coor < GRID_X_SIZE &&
y_coor >= 0 && y_coor < GRID_Y_SIZE &&
z_coor >= 0 && z_coor < GRID_Z_SIZE)
{
int count = atomicAdd(&pillar_count_histo[y_coor*GRID_X_SIZE + x_coor], 1);
if(count < max_points_per_pillar)
{
int ind = y_coor*GRID_X_SIZE*max_points_per_pillar + x_coor*max_points_per_pillar + count;
dev_pillar_x_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 0];
dev_pillar_y_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 1];
dev_pillar_z_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 2];
dev_pillar_i_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 3];
}
}
} |
955d0414731c621e38e8d252873a6002b398cdb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <new>
#include "makebmp.h"
#include <cutil.h>
#include <helper_timer.h>
#include <rayTracing_kernel.cu>
#define PI 3.141592654f
#define Angle(a) ((a * PI) / 180.0)
int g_verbose;
int t = 1;
class Observateur {
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur();
Observateur(const float3 &, const float3 &, const float3 &, double);
inline const matrice3x4 &getMatrice() const { return M; }
inline float getDistance() const { return df; }
};
Observateur::Observateur() {
M.m[0] = make_float4(0.0f, 0.0f, 1.0f, 0.0f);
M.m[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
M.m[2] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
df = 1.0 / tan(Angle(65) / 2.0);
}
Observateur::Observateur(const float3 &p, const float3 &u, const float3 &v,
double a) {
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U, VP) * U);
W = normalize(cross(U, V));
M.m[0] = make_float4(U.x, U.y, U.z, p.x);
M.m[1] = make_float4(V.x, V.y, V.z, p.y);
M.m[2] = make_float4(W.x, W.y, W.z, p.z);
df = 1.0 / tan(Angle(a) / 2.0);
}
Observateur obs = Observateur(
make_float3(0.0f, 0.5f, 2.0f),
normalize(make_float3(0.0f, 0.0f, 0.0f) - make_float3(0.0f, 0.5f, 2.0f)),
make_float3(0.0f, 1.0f, 0.0f), 65.0f);
#include <rayTracing_kernel.cu>
unsigned width = 64; // 640; //512; //16; //32; //512;
unsigned height = 64; // 480; //512; //16;//512;
dim3 blockSize(16, 8);
dim3 gridSize(width / blockSize.x, height / blockSize.y);
StopWatchInterface *timer = NULL;
uint *c_output, *d_output;
int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
void initPixelBuffer() {
// int num = width * height;
// float phi = 2.0f/(float)min(width,height);
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
};
// Rendu de l'image avec CUDA
void render(Object **objList, int n) {
sdkStartTimer(&timer);
hipLaunchKernelGGL(( render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, objList, width, height,
obs.getDistance(), n);
// render_vptr <<<gridSize, blockSize>>>(d_output, objList, width, height,
// obs.getDistance(), n);
CUDA_SAFE_CALL(hipDeviceSynchronize());
sdkStopTimer(&timer);
CUDA_SAFE_CALL(hipMemcpy(c_output, d_output, width * height * sizeof(uint),
hipMemcpyDeviceToHost));
unsigned long long int checksum = 0;
for (int y = (height - 1); y >= 0; y--) {
if (g_verbose) printf("\n");
for (int x = 0; x < width; x++) {
if (g_verbose) printf("%010u ", (unsigned)c_output[x + y * width]);
checksum += c_output[x + y * width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
}
// Affichage du resultat avec OpenGL
void display(Object **objList, int n) {
// Affichage du resultat
render(objList, n);
printf("Kernel Time: %f \n", sdkGetTimerValue(&timer));
t--;
if (!t) {
return;
}
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
// initialise card and timer
int deviceCount;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[4]));
CUDA_SAFE_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1) break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(hipSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 5) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i = 5; i < argc; i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v':
g_verbose = 1;
break;
default:
commandline_error = 1;
}
} else
commandline_error = 1;
}
} else
commandline_error = 1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n");
printf(
"where WIDTH and HEIGHT are the screen dimensions and -v is used "
"to display an abstract representation of the output.\n");
return 1;
}
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
initialize_bmp(width, height, 32);
Object **objList;
int n = atoi(argv[3]);
float *A;
float *d_A;
A = (float *)malloc(n * 8 * sizeof(float));
d_A = (float *)my_obj_alloc.calloc<float>(n * 8);
srand(47);
A[0] = 0.0f;
A[1] = 1.0f;
A[2] = 1.0f;
A[3] = 1.0f;
A[4] = 0.0f;
A[5] = -1.5f;
A[6] = -0.0f;
A[7] = 0.5f;
A[8] = 1.0f;
A[8 + 1] = 0.0f;
A[8 + 2] = 0.0f;
A[8 + 3] = 1.0f;
A[8 + 4] = -1.0f;
A[8 + 5] = 0.0f;
A[8 + 6] = -1.0f;
A[8 + 7] = 0.5f;
A[16] = 0.0f;
A[16 + 1] = 0.0f;
A[16 + 2] = 1.0f;
A[16 + 3] = 1.0f;
A[16 + 4] = 1.0f;
A[16 + 5] = -0.0f;
A[16 + 6] = -1.0f;
A[16 + 7] = 0.5f;
A[24] = 0.0f;
A[24 + 1] = 1.0f;
A[24 + 2] = 0.0f;
A[24 + 3] = 1.0f;
A[24 + 4] = 0.0f;
A[24 + 5] = -0.0f;
A[24 + 6] = -2.0f;
A[24 + 7] = 0.75f;
for (int i(4); i < n; i++) {
float r, v, b;
float tmp1(5.0f * ((r = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp2(5.0f * ((v = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp3(-5.0f * ((b = (float(rand() % 255) / 255.0f))));
float tmp4((rand() % 100) / 100.0f);
A[i * 8 + 4] = tmp1;
A[i * 8 + 5] = tmp2;
A[i * 8 + 6] = tmp3;
A[i * 8 + 7] = tmp4;
A[i * 8] = r;
A[i * 8 + 1] = v;
A[i * 8 + 2] = b;
A[i * 8 + 3] = 1.0f;
}
hipMemcpy(d_A, A, n * 8 * sizeof(float), hipMemcpyHostToDevice);
objList = (Object **)my_obj_alloc.calloc<Object *>(n);
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
initObject(objList, d_A, n, &my_obj_alloc);
hipLaunchKernelGGL(( initObject_kern), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, objList, d_A, n);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
hipDeviceSynchronize();
c_output = (uint *)calloc(width * height, sizeof(uint));
CUDA_SAFE_CALL(
hipMalloc((void **)&d_output, width * height * sizeof(uint)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(MView, (void *)&obs, 3 * sizeof(float4)));
initPixelBuffer();
display(objList, n);
create_bmp(c_output);
sdkDeleteTimer(&timer);
return 0;
}
| 955d0414731c621e38e8d252873a6002b398cdb3.cu | /*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <new>
#include "makebmp.h"
#include <cutil.h>
#include <helper_timer.h>
#include <rayTracing_kernel.cu>
#define PI 3.141592654f
#define Angle(a) ((a * PI) / 180.0)
int g_verbose;
int t = 1;
class Observateur {
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur();
Observateur(const float3 &, const float3 &, const float3 &, double);
inline const matrice3x4 &getMatrice() const { return M; }
inline float getDistance() const { return df; }
};
Observateur::Observateur() {
M.m[0] = make_float4(0.0f, 0.0f, 1.0f, 0.0f);
M.m[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
M.m[2] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
df = 1.0 / tan(Angle(65) / 2.0);
}
Observateur::Observateur(const float3 &p, const float3 &u, const float3 &v,
double a) {
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U, VP) * U);
W = normalize(cross(U, V));
M.m[0] = make_float4(U.x, U.y, U.z, p.x);
M.m[1] = make_float4(V.x, V.y, V.z, p.y);
M.m[2] = make_float4(W.x, W.y, W.z, p.z);
df = 1.0 / tan(Angle(a) / 2.0);
}
Observateur obs = Observateur(
make_float3(0.0f, 0.5f, 2.0f),
normalize(make_float3(0.0f, 0.0f, 0.0f) - make_float3(0.0f, 0.5f, 2.0f)),
make_float3(0.0f, 1.0f, 0.0f), 65.0f);
#include <rayTracing_kernel.cu>
unsigned width = 64; // 640; //512; //16; //32; //512;
unsigned height = 64; // 480; //512; //16;//512;
dim3 blockSize(16, 8);
dim3 gridSize(width / blockSize.x, height / blockSize.y);
StopWatchInterface *timer = NULL;
uint *c_output, *d_output;
int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
void initPixelBuffer() {
// int num = width * height;
// float phi = 2.0f/(float)min(width,height);
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
};
// Rendu de l'image avec CUDA
void render(Object **objList, int n) {
sdkStartTimer(&timer);
render<<<gridSize, blockSize>>>(d_output, objList, width, height,
obs.getDistance(), n);
// render_vptr <<<gridSize, blockSize>>>(d_output, objList, width, height,
// obs.getDistance(), n);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
sdkStopTimer(&timer);
CUDA_SAFE_CALL(cudaMemcpy(c_output, d_output, width * height * sizeof(uint),
cudaMemcpyDeviceToHost));
unsigned long long int checksum = 0;
for (int y = (height - 1); y >= 0; y--) {
if (g_verbose) printf("\n");
for (int x = 0; x < width; x++) {
if (g_verbose) printf("%010u ", (unsigned)c_output[x + y * width]);
checksum += c_output[x + y * width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
}
// Affichage du resultat avec OpenGL
void display(Object **objList, int n) {
// Affichage du resultat
render(objList, n);
printf("Kernel Time: %f \n", sdkGetTimerValue(&timer));
t--;
if (!t) {
return;
}
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
// initialise card and timer
int deviceCount;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[4]));
CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1) break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(cudaSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 5) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i = 5; i < argc; i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v':
g_verbose = 1;
break;
default:
commandline_error = 1;
}
} else
commandline_error = 1;
}
} else
commandline_error = 1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n");
printf(
"where WIDTH and HEIGHT are the screen dimensions and -v is used "
"to display an abstract representation of the output.\n");
return 1;
}
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
initialize_bmp(width, height, 32);
Object **objList;
int n = atoi(argv[3]);
float *A;
float *d_A;
A = (float *)malloc(n * 8 * sizeof(float));
d_A = (float *)my_obj_alloc.calloc<float>(n * 8);
srand(47);
A[0] = 0.0f;
A[1] = 1.0f;
A[2] = 1.0f;
A[3] = 1.0f;
A[4] = 0.0f;
A[5] = -1.5f;
A[6] = -0.0f;
A[7] = 0.5f;
A[8] = 1.0f;
A[8 + 1] = 0.0f;
A[8 + 2] = 0.0f;
A[8 + 3] = 1.0f;
A[8 + 4] = -1.0f;
A[8 + 5] = 0.0f;
A[8 + 6] = -1.0f;
A[8 + 7] = 0.5f;
A[16] = 0.0f;
A[16 + 1] = 0.0f;
A[16 + 2] = 1.0f;
A[16 + 3] = 1.0f;
A[16 + 4] = 1.0f;
A[16 + 5] = -0.0f;
A[16 + 6] = -1.0f;
A[16 + 7] = 0.5f;
A[24] = 0.0f;
A[24 + 1] = 1.0f;
A[24 + 2] = 0.0f;
A[24 + 3] = 1.0f;
A[24 + 4] = 0.0f;
A[24 + 5] = -0.0f;
A[24 + 6] = -2.0f;
A[24 + 7] = 0.75f;
for (int i(4); i < n; i++) {
float r, v, b;
float tmp1(5.0f * ((r = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp2(5.0f * ((v = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp3(-5.0f * ((b = (float(rand() % 255) / 255.0f))));
float tmp4((rand() % 100) / 100.0f);
A[i * 8 + 4] = tmp1;
A[i * 8 + 5] = tmp2;
A[i * 8 + 6] = tmp3;
A[i * 8 + 7] = tmp4;
A[i * 8] = r;
A[i * 8 + 1] = v;
A[i * 8 + 2] = b;
A[i * 8 + 3] = 1.0f;
}
cudaMemcpy(d_A, A, n * 8 * sizeof(float), cudaMemcpyHostToDevice);
objList = (Object **)my_obj_alloc.calloc<Object *>(n);
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
initObject(objList, d_A, n, &my_obj_alloc);
initObject_kern<<<blocksPerGrid, threadsPerBlock>>>(objList, d_A, n);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
cudaDeviceSynchronize();
c_output = (uint *)calloc(width * height, sizeof(uint));
CUDA_SAFE_CALL(
cudaMalloc((void **)&d_output, width * height * sizeof(uint)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(MView, (void *)&obs, 3 * sizeof(float4)));
initPixelBuffer();
display(objList, n);
create_bmp(c_output);
sdkDeleteTimer(&timer);
return 0;
}
|
516e42a524e76a334ce85b0bbcfc7ff3da4a195d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/generate_proposals_op.h"
#include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT
#include "caffe2/operators/generate_proposals_op_util_nms.h"
#include "caffe2/operators/generate_proposals_op_util_nms_gpu.h"
#if defined(USE_ROCM)
#include <cfloat>
#endif
using caffe2::utils::RotatedBox;
namespace caffe2 {
namespace {
__global__ void GeneratePreNMSUprightBoxesKernel(
const int* d_sorted_scores_keys,
const int nboxes_to_generate,
const float* d_bbox_deltas,
const float4* d_anchors,
const int H,
const int W,
const int A,
const float feat_stride,
const float min_size,
const float* d_img_info_vec,
const int num_images,
const float bbox_xform_clip,
const bool legacy_plus_one,
float4* d_out_boxes,
const int prenms_nboxes, // leading dimension of out_boxes
float* d_inout_scores,
char* d_boxes_keep_flags) {
const int K = H * W;
const int KA = K * A;
CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
// box_conv_index : # of the same box, but indexed in
// the scores from the conv layer, of shape (A,H,W)
// the num_images dimension was already removed
// box_conv_index = a*K + h*W + w
const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox];
// We want to decompose box_conv_index in (a,h,w)
// such as box_conv_index = a*K + h*W + w
// (avoiding modulos in the process)
int remaining = box_conv_index;
const int dA = K; // stride of A
const int a = remaining / dA;
remaining -= a * dA;
const int dH = W; // stride of H
const int h = remaining / dH;
remaining -= h * dH;
const int w = remaining; // dW = 1
// Loading the anchor a
// float4 is a struct with float x,y,z,w
const float4 anchor = d_anchors[a];
// x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w)
const float shift_w = feat_stride * w;
float x1 = shift_w + anchor.x;
float x2 = shift_w + anchor.z;
const float shift_h = feat_stride * h;
float y1 = shift_h + anchor.y;
float y2 = shift_h + anchor.w;
// TODO use fast math when possible
// Deltas for that box
// Deltas of shape (num_images,4*A,K)
// We're going to compute 4 scattered reads
// better than the alternative, ie transposing the complete deltas
// array first
int deltas_idx = image_index * (KA * 4) + a * 4 * K + h * W + w;
const float dx = d_bbox_deltas[deltas_idx];
// Stride of K between each dimension
deltas_idx += K;
const float dy = d_bbox_deltas[deltas_idx];
deltas_idx += K;
float dw = d_bbox_deltas[deltas_idx];
deltas_idx += K;
float dh = d_bbox_deltas[deltas_idx];
// Upper bound on dw,dh
dw = fmin(dw, bbox_xform_clip);
dh = fmin(dh, bbox_xform_clip);
// Applying the deltas
float width = x2 - x1 + float(int(legacy_plus_one));
const float ctr_x = x1 + 0.5f * width;
const float pred_ctr_x = ctr_x + width * dx; // TODO fuse madd
const float pred_w = width * expf(dw);
x1 = pred_ctr_x - 0.5f * pred_w;
x2 = pred_ctr_x + 0.5f * pred_w - float(int(legacy_plus_one));
float height = y2 - y1 + float(int(legacy_plus_one));
const float ctr_y = y1 + 0.5f * height;
const float pred_ctr_y = ctr_y + height * dy;
const float pred_h = height * expf(dh);
y1 = pred_ctr_y - 0.5f * pred_h;
y2 = pred_ctr_y + 0.5f * pred_h - float(int(legacy_plus_one));
// Clipping box to image
const float img_height = d_img_info_vec[3 * image_index + 0];
const float img_width = d_img_info_vec[3 * image_index + 1];
const float min_size_scaled =
min_size * d_img_info_vec[3 * image_index + 2];
x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f);
y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f);
x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f);
y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f);
// Filter boxes
// Removing boxes with one dim < min_size
// (center of box is in image, because of previous step)
width = x2 - x1 + float(int(legacy_plus_one)); // may have changed
height = y2 - y1 + float(int(legacy_plus_one));
bool keep_box = fmin(width, height) >= min_size_scaled;
// We are not deleting the box right now even if !keep_box
// we want to keep the relative order of the elements stable
// we'll do it in such a way later
// d_boxes_keep_flags size: (num_images,prenms_nboxes)
// d_out_boxes size: (num_images,prenms_nboxes)
const int out_index = image_index * prenms_nboxes + ibox;
d_boxes_keep_flags[out_index] = keep_box;
d_out_boxes[out_index] = {x1, y1, x2, y2};
// d_inout_scores size: (num_images,KA)
if (!keep_box)
d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS
}
}
__global__ void GeneratePreNMSRotatedBoxesKernel(
const int* d_sorted_scores_keys,
const int nboxes_to_generate,
const float* d_bbox_deltas,
const RotatedBox* d_anchors,
const int H,
const int W,
const int A,
const float feat_stride,
const float min_size,
const float* d_img_info_vec,
const int num_images,
const float bbox_xform_clip,
const bool legacy_plus_one,
const bool angle_bound_on,
const int angle_bound_lo,
const int angle_bound_hi,
const bool clip_angle_thresh,
RotatedBox* d_out_boxes,
const int prenms_nboxes, // leading dimension of out_boxes
float* d_inout_scores,
char* d_boxes_keep_flags) {
constexpr float PI = 3.14159265358979323846;
const int K = H * W;
const int KA = K * A;
CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
// box_conv_index : # of the same box, but indexed in
// the scores from the conv layer, of shape (A,H,W)
// the num_images dimension was already removed
// box_conv_index = a*K + h*W + w
const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox];
// We want to decompose box_conv_index in (a,h,w)
// such as box_conv_index = a*K + h*W + w
// (avoiding modulos in the process)
int remaining = box_conv_index;
const int dA = K; // stride of A
const int a = remaining / dA;
remaining -= a * dA;
const int dH = W; // stride of H
const int h = remaining / dH;
remaining -= h * dH;
const int w = remaining; // dW = 1
// Loading the anchor a and applying shifts.
// RotatedBox in [ctr_x, ctr_y, w, h, angle] format.
// Zero shift for width, height and angle.
RotatedBox box = d_anchors[a];
box.x_ctr += feat_stride * w; // x_ctr shifted for w
box.y_ctr += feat_stride * h; // y_ctr shifted for h
// TODO use fast math when possible
// Deltas for that box
// Deltas of shape (num_images,5*A,K)
// We're going to compute 5 scattered reads
// better than the alternative, ie transposing the complete deltas
// array first
int deltas_idx = image_index * (KA * 5) + a * 5 * K + h * W + w;
// Stride of K between each dimension
RotatedBox delta;
delta.x_ctr = d_bbox_deltas[deltas_idx + K * 0];
delta.y_ctr = d_bbox_deltas[deltas_idx + K * 1];
delta.w = d_bbox_deltas[deltas_idx + K * 2];
delta.h = d_bbox_deltas[deltas_idx + K * 3];
delta.a = d_bbox_deltas[deltas_idx + K * 4];
// Upper bound on dw,dh
delta.w = fmin(delta.w, bbox_xform_clip);
delta.h = fmin(delta.h, bbox_xform_clip);
// Convert back to degrees
delta.a *= 180.f / PI;
// Applying the deltas
box.x_ctr += delta.x_ctr * box.w;
box.y_ctr += delta.y_ctr * box.h;
box.w *= expf(delta.w);
box.h *= expf(delta.h);
box.a += delta.a;
if (angle_bound_on) {
// Normalize angle to be within [angle_bound_lo, angle_bound_hi].
// Deltas are guaranteed to be <= period / 2 while computing training
// targets by bbox_transform_inv.
const float period = angle_bound_hi - angle_bound_lo;
// CAFFE_ENFORCE(period > 0 && period % 180 == 0);
if (box.a < angle_bound_lo) {
box.a += period;
} else if (box.a > angle_bound_hi) {
box.a -= period;
}
}
// Clipping box to image.
// Only clip boxes that are almost upright (with a tolerance of
// clip_angle_thresh) for backward compatibility with horizontal boxes.
const float img_height = d_img_info_vec[3 * image_index + 0];
const float img_width = d_img_info_vec[3 * image_index + 1];
const float min_size_scaled =
min_size * d_img_info_vec[3 * image_index + 2];
if (fabs(box.a) <= clip_angle_thresh) {
// Convert from [x_ctr, y_ctr, w, h] to [x1, y1, x2, y2]
float x1 = box.x_ctr - (box.w - float(int(legacy_plus_one))) / 2.f;
float y1 = box.y_ctr - (box.h - float(int(legacy_plus_one))) / 2.f;
float x2 = x1 + box.w - float(int(legacy_plus_one));
float y2 = y1 + box.h - float(int(legacy_plus_one));
// Clip
x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f);
y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f);
x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f);
y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f);
// Convert back to [x_ctr, y_ctr, w, h]
box.x_ctr = (x1 + x2) / 2.f;
box.y_ctr = (y1 + y2) / 2.f;
box.w = x2 - x1 + float(int(legacy_plus_one));
box.h = y2 - y1 + float(int(legacy_plus_one));
}
// Filter boxes.
// Removing boxes with one dim < min_size or center outside the image.
bool keep_box = (fmin(box.w, box.h) >= min_size_scaled) &&
(box.x_ctr < img_width) && (box.y_ctr < img_height);
// We are not deleting the box right now even if !keep_box
// we want to keep the relative order of the elements stable
// we'll do it in such a way later
// d_boxes_keep_flags size: (num_images,prenms_nboxes)
// d_out_boxes size: (num_images,prenms_nboxes)
const int out_index = image_index * prenms_nboxes + ibox;
d_boxes_keep_flags[out_index] = keep_box;
d_out_boxes[out_index] = box;
// d_inout_scores size: (num_images,KA)
if (!keep_box) {
d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS
}
}
}
__global__ void WriteUprightBoxesOutput(
const float4* d_image_boxes,
const float* d_image_scores,
const int* d_image_boxes_keep_list,
const int nboxes,
const int image_index,
float* d_image_out_rois,
float* d_image_out_rois_probs) {
CUDA_1D_KERNEL_LOOP(i, nboxes) {
const int ibox = d_image_boxes_keep_list[i];
const float4 box = d_image_boxes[ibox];
const float score = d_image_scores[ibox];
// Scattered memory accesses
// postnms_nboxes is small anyway
d_image_out_rois_probs[i] = score;
const int base_idx = 5 * i;
d_image_out_rois[base_idx + 0] = image_index;
d_image_out_rois[base_idx + 1] = box.x;
d_image_out_rois[base_idx + 2] = box.y;
d_image_out_rois[base_idx + 3] = box.z;
d_image_out_rois[base_idx + 4] = box.w;
}
}
__global__ void WriteRotatedBoxesOutput(
const RotatedBox* d_image_boxes,
const float* d_image_scores,
const int* d_image_boxes_keep_list,
const int nboxes,
const int image_index,
float* d_image_out_rois,
float* d_image_out_rois_probs) {
CUDA_1D_KERNEL_LOOP(i, nboxes) {
const int ibox = d_image_boxes_keep_list[i];
const RotatedBox box = d_image_boxes[ibox];
const float score = d_image_scores[ibox];
// Scattered memory accesses
// postnms_nboxes is small anyway
d_image_out_rois_probs[i] = score;
const int base_idx = 6 * i;
d_image_out_rois[base_idx + 0] = image_index;
d_image_out_rois[base_idx + 1] = box.x_ctr;
d_image_out_rois[base_idx + 2] = box.y_ctr;
d_image_out_rois[base_idx + 3] = box.w;
d_image_out_rois[base_idx + 4] = box.h;
d_image_out_rois[base_idx + 5] = box.a;
}
}
__global__ void InitializeDataKernel(
const int num_images,
const int KA,
int* d_image_offsets,
int* d_boxes_keys_iota) {
CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) {
d_boxes_keys_iota[img_idx * KA + box_idx] = box_idx;
// One 1D line sets the 1D data
if (box_idx == 0) {
d_image_offsets[img_idx] = KA * img_idx;
// One thread sets the last+1 offset
if (img_idx == 0)
d_image_offsets[num_images] = KA * num_images;
}
}
}
} // namespace
template <>
bool GenerateProposalsOp<CUDAContext>::RunOnDevice() {
const auto& scores = Input(0);
const auto& bbox_deltas = Input(1);
const auto& im_info_tensor = Input(2);
const auto& anchors = Input(3);
auto* out_rois = Output(0);
auto* out_rois_probs = Output(1);
CAFFE_ENFORCE_EQ(scores.ndim(), 4, scores.ndim());
CAFFE_ENFORCE(scores.template IsType<float>(), scores.meta().name());
const auto num_images = scores.dim(0);
const auto A = scores.dim(1);
const auto H = scores.dim(2);
const auto W = scores.dim(3);
const auto box_dim = anchors.dim(1);
CAFFE_ENFORCE(box_dim == 4 || box_dim == 5);
const int K = H * W;
const int conv_layer_nboxes = K * A;
// Getting data members ready
// We'll sort the scores
// we want to remember their original indexes,
// ie their indexes in the tensor of shape (num_images,A,K)
// from the conv layer
// each row of d_conv_layer_indexes is at first initialized to 1..A*K
dev_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes);
int* d_conv_layer_indexes =
dev_conv_layer_indexes_.template mutable_data<int>();
// d_image_offset[i] = i*K*A for i from 1 to num_images+1
// Used by the segmented sort to only sort scores within one image
dev_image_offset_.Resize(num_images + 1);
int* d_image_offset = dev_image_offset_.template mutable_data<int>();
// The following calls to CUB primitives do nothing
// (because the first arg is nullptr)
// except setting cub_*_temp_storage_bytes
size_t cub_sort_temp_storage_bytes = 0;
float* flt_ptr = nullptr;
int* int_ptr = nullptr;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
nullptr,
cub_sort_temp_storage_bytes,
flt_ptr,
flt_ptr,
int_ptr,
int_ptr,
num_images * conv_layer_nboxes,
num_images,
int_ptr,
int_ptr,
0,
8 * sizeof(float), // sort all bits
context_.cuda_stream());
// Allocate temporary storage for CUB
dev_cub_sort_buffer_.Resize(cub_sort_temp_storage_bytes);
void* d_cub_sort_temp_storage =
dev_cub_sort_buffer_.template mutable_data<char>();
size_t cub_select_temp_storage_bytes = 0;
char* char_ptr = nullptr;
hipcub::DeviceSelect::Flagged(
nullptr,
cub_select_temp_storage_bytes,
flt_ptr,
char_ptr,
flt_ptr,
int_ptr,
K * A,
context_.cuda_stream());
// Allocate temporary storage for CUB
dev_cub_select_buffer_.Resize(cub_select_temp_storage_bytes);
void* d_cub_select_temp_storage =
dev_cub_select_buffer_.template mutable_data<char>();
// Initialize :
// - each row of dev_conv_layer_indexes to 1..K*A
// - each d_nboxes to 0
// - d_image_offset[i] = K*A*i for i 1..num_images+1
// 2D grid
hipLaunchKernelGGL(( InitializeDataKernel),
dim3((CAFFE_GET_BLOCKS(A * K), num_images)),
dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1
0,
context_.cuda_stream(),
num_images, conv_layer_nboxes, d_image_offset, d_conv_layer_indexes);
C10_HIP_KERNEL_LAUNCH_CHECK();
// Sorting input scores
dev_sorted_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes);
dev_sorted_scores_.Resize(num_images, conv_layer_nboxes);
const float* d_in_scores = scores.data<float>();
int* d_sorted_conv_layer_indexes =
dev_sorted_conv_layer_indexes_.template mutable_data<int>();
float* d_sorted_scores = dev_sorted_scores_.template mutable_data<float>();
;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
d_cub_sort_temp_storage,
cub_sort_temp_storage_bytes,
d_in_scores,
d_sorted_scores,
d_conv_layer_indexes,
d_sorted_conv_layer_indexes,
num_images * conv_layer_nboxes,
num_images,
d_image_offset,
d_image_offset + 1,
0,
8 * sizeof(float), // sort all bits
context_.cuda_stream());
// Keeping only the topN pre_nms
const int nboxes_to_generate = ::min(conv_layer_nboxes, rpn_pre_nms_topN_);
// Generating the boxes associated to the topN pre_nms scores
dev_boxes_.Resize(num_images, box_dim * nboxes_to_generate);
dev_boxes_keep_flags_.Resize(num_images, nboxes_to_generate);
const float* d_bbox_deltas = bbox_deltas.data<float>();
const float* d_anchors = anchors.data<float>();
const float* d_im_info_vec = im_info_tensor.data<float>();
float* d_boxes = dev_boxes_.template mutable_data<float>();
;
char* d_boxes_keep_flags =
dev_boxes_keep_flags_.template mutable_data<char>();
if (box_dim == 4) {
hipLaunchKernelGGL(( GeneratePreNMSUprightBoxesKernel),
dim3((CAFFE_GET_BLOCKS(nboxes_to_generate), num_images)),
dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1
0,
context_.cuda_stream(),
d_sorted_conv_layer_indexes,
nboxes_to_generate,
d_bbox_deltas,
reinterpret_cast<const float4*>(d_anchors),
H,
W,
A,
feat_stride_,
rpn_min_size_,
d_im_info_vec,
num_images,
utils::BBOX_XFORM_CLIP_DEFAULT,
legacy_plus_one_,
reinterpret_cast<float4*>(d_boxes),
nboxes_to_generate,
d_sorted_scores,
d_boxes_keep_flags);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( GeneratePreNMSRotatedBoxesKernel),
dim3((CAFFE_GET_BLOCKS(nboxes_to_generate), num_images)),
dim3(CAFFE_CUDA_NUM_THREADS), // blockDim.y == 1
0,
context_.cuda_stream(),
d_sorted_conv_layer_indexes,
nboxes_to_generate,
d_bbox_deltas,
reinterpret_cast<const RotatedBox*>(d_anchors),
H,
W,
A,
feat_stride_,
rpn_min_size_,
d_im_info_vec,
num_images,
utils::BBOX_XFORM_CLIP_DEFAULT,
legacy_plus_one_,
angle_bound_on_,
angle_bound_lo_,
angle_bound_hi_,
clip_angle_thresh_,
reinterpret_cast<RotatedBox*>(d_boxes),
nboxes_to_generate,
d_sorted_scores,
d_boxes_keep_flags);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
const int nboxes_generated = nboxes_to_generate;
dev_image_prenms_boxes_.Resize(box_dim * nboxes_generated);
float* d_image_prenms_boxes =
dev_image_prenms_boxes_.template mutable_data<float>();
dev_image_prenms_scores_.Resize(nboxes_generated);
float* d_image_prenms_scores =
dev_image_prenms_scores_.template mutable_data<float>();
dev_image_boxes_keep_list_.Resize(nboxes_generated);
int* d_image_boxes_keep_list =
dev_image_boxes_keep_list_.template mutable_data<int>();
const int roi_cols = box_dim + 1;
const int max_postnms_nboxes = ::min(nboxes_generated, rpn_post_nms_topN_);
dev_postnms_rois_.Resize(roi_cols * num_images * max_postnms_nboxes);
dev_postnms_rois_probs_.Resize(num_images * max_postnms_nboxes);
float* d_postnms_rois = dev_postnms_rois_.template mutable_data<float>();
float* d_postnms_rois_probs =
dev_postnms_rois_probs_.template mutable_data<float>();
dev_prenms_nboxes_.Resize(num_images);
host_prenms_nboxes_.Resize(num_images);
int* d_prenms_nboxes = dev_prenms_nboxes_.template mutable_data<int>();
int* h_prenms_nboxes = host_prenms_nboxes_.template mutable_data<int>();
int nrois_in_output = 0;
for (int image_index = 0; image_index < num_images; ++image_index) {
// Sub matrices for current image
const float* d_image_boxes =
&d_boxes[image_index * nboxes_generated * box_dim];
const float* d_image_sorted_scores = &d_sorted_scores[image_index * K * A];
char* d_image_boxes_keep_flags =
&d_boxes_keep_flags[image_index * nboxes_generated];
float* d_image_postnms_rois = &d_postnms_rois[roi_cols * nrois_in_output];
float* d_image_postnms_rois_probs = &d_postnms_rois_probs[nrois_in_output];
// Moving valid boxes (ie the ones with d_boxes_keep_flags[ibox] == true)
// to the output tensors
if (box_dim == 4) {
hipcub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
reinterpret_cast<const float4*>(d_image_boxes),
d_image_boxes_keep_flags,
reinterpret_cast<float4*>(d_image_prenms_boxes),
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
} else {
hipcub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
reinterpret_cast<const RotatedBox*>(d_image_boxes),
d_image_boxes_keep_flags,
reinterpret_cast<RotatedBox*>(d_image_prenms_boxes),
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
}
hipcub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
d_image_sorted_scores,
d_image_boxes_keep_flags,
d_image_prenms_scores,
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
host_prenms_nboxes_.CopyFrom(dev_prenms_nboxes_);
// We know prenms_boxes <= topN_prenms, because nboxes_generated <=
// topN_prenms. Calling NMS on the generated boxes
const int prenms_nboxes = *h_prenms_nboxes;
int nkeep;
utils::nms_gpu(
d_image_prenms_boxes,
prenms_nboxes,
rpn_nms_thresh_,
legacy_plus_one_,
d_image_boxes_keep_list,
&nkeep,
dev_nms_mask_,
host_nms_mask_,
&context_,
box_dim);
// All operations done after previous sort were keeping the relative order
// of the elements the elements are still sorted keep topN <=> truncate the
// array
const int postnms_nboxes = ::min(nkeep, rpn_post_nms_topN_);
// Moving the out boxes to the output tensors,
// adding the image_index dimension on the fly
if (box_dim == 4) {
hipLaunchKernelGGL(( WriteUprightBoxesOutput),
dim3(CAFFE_GET_BLOCKS(postnms_nboxes)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
reinterpret_cast<const float4*>(d_image_prenms_boxes),
d_image_prenms_scores,
d_image_boxes_keep_list,
postnms_nboxes,
image_index,
d_image_postnms_rois,
d_image_postnms_rois_probs);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( WriteRotatedBoxesOutput),
dim3(CAFFE_GET_BLOCKS(postnms_nboxes)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
reinterpret_cast<const RotatedBox*>(d_image_prenms_boxes),
d_image_prenms_scores,
d_image_boxes_keep_list,
postnms_nboxes,
image_index,
d_image_postnms_rois,
d_image_postnms_rois_probs);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
nrois_in_output += postnms_nboxes;
}
// Using a buffer because we cannot call ShrinkTo
out_rois->Resize(nrois_in_output, roi_cols);
out_rois_probs->Resize(nrois_in_output);
float* d_out_rois = out_rois->template mutable_data<float>();
float* d_out_rois_probs = out_rois_probs->template mutable_data<float>();
CUDA_CHECK(hipMemcpyAsync(
d_out_rois,
d_postnms_rois,
nrois_in_output * roi_cols * sizeof(float),
hipMemcpyDeviceToDevice,
context_.cuda_stream()));
CUDA_CHECK(hipMemcpyAsync(
d_out_rois_probs,
d_postnms_rois_probs,
nrois_in_output * sizeof(float),
hipMemcpyDeviceToDevice,
context_.cuda_stream()));
return true;
}
REGISTER_CUDA_OPERATOR(GenerateProposals, GenerateProposalsOp<CUDAContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
GenerateProposals,
caffe2::GenerateProposalsOp<caffe2::CUDAContext>);
| 516e42a524e76a334ce85b0bbcfc7ff3da4a195d.cu | #include <cub/cub.cuh>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/generate_proposals_op.h"
#include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT
#include "caffe2/operators/generate_proposals_op_util_nms.h"
#include "caffe2/operators/generate_proposals_op_util_nms_gpu.h"
#if defined(USE_ROCM)
#include <cfloat>
#endif
using caffe2::utils::RotatedBox;
namespace caffe2 {
namespace {
__global__ void GeneratePreNMSUprightBoxesKernel(
const int* d_sorted_scores_keys,
const int nboxes_to_generate,
const float* d_bbox_deltas,
const float4* d_anchors,
const int H,
const int W,
const int A,
const float feat_stride,
const float min_size,
const float* d_img_info_vec,
const int num_images,
const float bbox_xform_clip,
const bool legacy_plus_one,
float4* d_out_boxes,
const int prenms_nboxes, // leading dimension of out_boxes
float* d_inout_scores,
char* d_boxes_keep_flags) {
const int K = H * W;
const int KA = K * A;
CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
// box_conv_index : # of the same box, but indexed in
// the scores from the conv layer, of shape (A,H,W)
// the num_images dimension was already removed
// box_conv_index = a*K + h*W + w
const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox];
// We want to decompose box_conv_index in (a,h,w)
// such as box_conv_index = a*K + h*W + w
// (avoiding modulos in the process)
int remaining = box_conv_index;
const int dA = K; // stride of A
const int a = remaining / dA;
remaining -= a * dA;
const int dH = W; // stride of H
const int h = remaining / dH;
remaining -= h * dH;
const int w = remaining; // dW = 1
// Loading the anchor a
// float4 is a struct with float x,y,z,w
const float4 anchor = d_anchors[a];
// x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w)
const float shift_w = feat_stride * w;
float x1 = shift_w + anchor.x;
float x2 = shift_w + anchor.z;
const float shift_h = feat_stride * h;
float y1 = shift_h + anchor.y;
float y2 = shift_h + anchor.w;
// TODO use fast math when possible
// Deltas for that box
// Deltas of shape (num_images,4*A,K)
// We're going to compute 4 scattered reads
// better than the alternative, ie transposing the complete deltas
// array first
int deltas_idx = image_index * (KA * 4) + a * 4 * K + h * W + w;
const float dx = d_bbox_deltas[deltas_idx];
// Stride of K between each dimension
deltas_idx += K;
const float dy = d_bbox_deltas[deltas_idx];
deltas_idx += K;
float dw = d_bbox_deltas[deltas_idx];
deltas_idx += K;
float dh = d_bbox_deltas[deltas_idx];
// Upper bound on dw,dh
dw = fmin(dw, bbox_xform_clip);
dh = fmin(dh, bbox_xform_clip);
// Applying the deltas
float width = x2 - x1 + float(int(legacy_plus_one));
const float ctr_x = x1 + 0.5f * width;
const float pred_ctr_x = ctr_x + width * dx; // TODO fuse madd
const float pred_w = width * expf(dw);
x1 = pred_ctr_x - 0.5f * pred_w;
x2 = pred_ctr_x + 0.5f * pred_w - float(int(legacy_plus_one));
float height = y2 - y1 + float(int(legacy_plus_one));
const float ctr_y = y1 + 0.5f * height;
const float pred_ctr_y = ctr_y + height * dy;
const float pred_h = height * expf(dh);
y1 = pred_ctr_y - 0.5f * pred_h;
y2 = pred_ctr_y + 0.5f * pred_h - float(int(legacy_plus_one));
// Clipping box to image
const float img_height = d_img_info_vec[3 * image_index + 0];
const float img_width = d_img_info_vec[3 * image_index + 1];
const float min_size_scaled =
min_size * d_img_info_vec[3 * image_index + 2];
x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f);
y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f);
x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f);
y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f);
// Filter boxes
// Removing boxes with one dim < min_size
// (center of box is in image, because of previous step)
width = x2 - x1 + float(int(legacy_plus_one)); // may have changed
height = y2 - y1 + float(int(legacy_plus_one));
bool keep_box = fmin(width, height) >= min_size_scaled;
// We are not deleting the box right now even if !keep_box
// we want to keep the relative order of the elements stable
// we'll do it in such a way later
// d_boxes_keep_flags size: (num_images,prenms_nboxes)
// d_out_boxes size: (num_images,prenms_nboxes)
const int out_index = image_index * prenms_nboxes + ibox;
d_boxes_keep_flags[out_index] = keep_box;
d_out_boxes[out_index] = {x1, y1, x2, y2};
// d_inout_scores size: (num_images,KA)
if (!keep_box)
d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS
}
}
__global__ void GeneratePreNMSRotatedBoxesKernel(
const int* d_sorted_scores_keys,
const int nboxes_to_generate,
const float* d_bbox_deltas,
const RotatedBox* d_anchors,
const int H,
const int W,
const int A,
const float feat_stride,
const float min_size,
const float* d_img_info_vec,
const int num_images,
const float bbox_xform_clip,
const bool legacy_plus_one,
const bool angle_bound_on,
const int angle_bound_lo,
const int angle_bound_hi,
const bool clip_angle_thresh,
RotatedBox* d_out_boxes,
const int prenms_nboxes, // leading dimension of out_boxes
float* d_inout_scores,
char* d_boxes_keep_flags) {
constexpr float PI = 3.14159265358979323846;
const int K = H * W;
const int KA = K * A;
CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) {
// box_conv_index : # of the same box, but indexed in
// the scores from the conv layer, of shape (A,H,W)
// the num_images dimension was already removed
// box_conv_index = a*K + h*W + w
const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox];
// We want to decompose box_conv_index in (a,h,w)
// such as box_conv_index = a*K + h*W + w
// (avoiding modulos in the process)
int remaining = box_conv_index;
const int dA = K; // stride of A
const int a = remaining / dA;
remaining -= a * dA;
const int dH = W; // stride of H
const int h = remaining / dH;
remaining -= h * dH;
const int w = remaining; // dW = 1
// Loading the anchor a and applying shifts.
// RotatedBox in [ctr_x, ctr_y, w, h, angle] format.
// Zero shift for width, height and angle.
RotatedBox box = d_anchors[a];
box.x_ctr += feat_stride * w; // x_ctr shifted for w
box.y_ctr += feat_stride * h; // y_ctr shifted for h
// TODO use fast math when possible
// Deltas for that box
// Deltas of shape (num_images,5*A,K)
// We're going to compute 5 scattered reads
// better than the alternative, ie transposing the complete deltas
// array first
int deltas_idx = image_index * (KA * 5) + a * 5 * K + h * W + w;
// Stride of K between each dimension
RotatedBox delta;
delta.x_ctr = d_bbox_deltas[deltas_idx + K * 0];
delta.y_ctr = d_bbox_deltas[deltas_idx + K * 1];
delta.w = d_bbox_deltas[deltas_idx + K * 2];
delta.h = d_bbox_deltas[deltas_idx + K * 3];
delta.a = d_bbox_deltas[deltas_idx + K * 4];
// Upper bound on dw,dh
delta.w = fmin(delta.w, bbox_xform_clip);
delta.h = fmin(delta.h, bbox_xform_clip);
// Convert back to degrees
delta.a *= 180.f / PI;
// Applying the deltas
box.x_ctr += delta.x_ctr * box.w;
box.y_ctr += delta.y_ctr * box.h;
box.w *= expf(delta.w);
box.h *= expf(delta.h);
box.a += delta.a;
if (angle_bound_on) {
// Normalize angle to be within [angle_bound_lo, angle_bound_hi].
// Deltas are guaranteed to be <= period / 2 while computing training
// targets by bbox_transform_inv.
const float period = angle_bound_hi - angle_bound_lo;
// CAFFE_ENFORCE(period > 0 && period % 180 == 0);
if (box.a < angle_bound_lo) {
box.a += period;
} else if (box.a > angle_bound_hi) {
box.a -= period;
}
}
// Clipping box to image.
// Only clip boxes that are almost upright (with a tolerance of
// clip_angle_thresh) for backward compatibility with horizontal boxes.
const float img_height = d_img_info_vec[3 * image_index + 0];
const float img_width = d_img_info_vec[3 * image_index + 1];
const float min_size_scaled =
min_size * d_img_info_vec[3 * image_index + 2];
if (fabs(box.a) <= clip_angle_thresh) {
// Convert from [x_ctr, y_ctr, w, h] to [x1, y1, x2, y2]
float x1 = box.x_ctr - (box.w - float(int(legacy_plus_one))) / 2.f;
float y1 = box.y_ctr - (box.h - float(int(legacy_plus_one))) / 2.f;
float x2 = x1 + box.w - float(int(legacy_plus_one));
float y2 = y1 + box.h - float(int(legacy_plus_one));
// Clip
x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f);
y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f);
x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f);
y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f);
// Convert back to [x_ctr, y_ctr, w, h]
box.x_ctr = (x1 + x2) / 2.f;
box.y_ctr = (y1 + y2) / 2.f;
box.w = x2 - x1 + float(int(legacy_plus_one));
box.h = y2 - y1 + float(int(legacy_plus_one));
}
// Filter boxes.
// Removing boxes with one dim < min_size or center outside the image.
bool keep_box = (fmin(box.w, box.h) >= min_size_scaled) &&
(box.x_ctr < img_width) && (box.y_ctr < img_height);
// We are not deleting the box right now even if !keep_box
// we want to keep the relative order of the elements stable
// we'll do it in such a way later
// d_boxes_keep_flags size: (num_images,prenms_nboxes)
// d_out_boxes size: (num_images,prenms_nboxes)
const int out_index = image_index * prenms_nboxes + ibox;
d_boxes_keep_flags[out_index] = keep_box;
d_out_boxes[out_index] = box;
// d_inout_scores size: (num_images,KA)
if (!keep_box) {
d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS
}
}
}
__global__ void WriteUprightBoxesOutput(
const float4* d_image_boxes,
const float* d_image_scores,
const int* d_image_boxes_keep_list,
const int nboxes,
const int image_index,
float* d_image_out_rois,
float* d_image_out_rois_probs) {
CUDA_1D_KERNEL_LOOP(i, nboxes) {
const int ibox = d_image_boxes_keep_list[i];
const float4 box = d_image_boxes[ibox];
const float score = d_image_scores[ibox];
// Scattered memory accesses
// postnms_nboxes is small anyway
d_image_out_rois_probs[i] = score;
const int base_idx = 5 * i;
d_image_out_rois[base_idx + 0] = image_index;
d_image_out_rois[base_idx + 1] = box.x;
d_image_out_rois[base_idx + 2] = box.y;
d_image_out_rois[base_idx + 3] = box.z;
d_image_out_rois[base_idx + 4] = box.w;
}
}
__global__ void WriteRotatedBoxesOutput(
const RotatedBox* d_image_boxes,
const float* d_image_scores,
const int* d_image_boxes_keep_list,
const int nboxes,
const int image_index,
float* d_image_out_rois,
float* d_image_out_rois_probs) {
CUDA_1D_KERNEL_LOOP(i, nboxes) {
const int ibox = d_image_boxes_keep_list[i];
const RotatedBox box = d_image_boxes[ibox];
const float score = d_image_scores[ibox];
// Scattered memory accesses
// postnms_nboxes is small anyway
d_image_out_rois_probs[i] = score;
const int base_idx = 6 * i;
d_image_out_rois[base_idx + 0] = image_index;
d_image_out_rois[base_idx + 1] = box.x_ctr;
d_image_out_rois[base_idx + 2] = box.y_ctr;
d_image_out_rois[base_idx + 3] = box.w;
d_image_out_rois[base_idx + 4] = box.h;
d_image_out_rois[base_idx + 5] = box.a;
}
}
__global__ void InitializeDataKernel(
const int num_images,
const int KA,
int* d_image_offsets,
int* d_boxes_keys_iota) {
CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) {
d_boxes_keys_iota[img_idx * KA + box_idx] = box_idx;
// One 1D line sets the 1D data
if (box_idx == 0) {
d_image_offsets[img_idx] = KA * img_idx;
// One thread sets the last+1 offset
if (img_idx == 0)
d_image_offsets[num_images] = KA * num_images;
}
}
}
} // namespace
template <>
bool GenerateProposalsOp<CUDAContext>::RunOnDevice() {
const auto& scores = Input(0);
const auto& bbox_deltas = Input(1);
const auto& im_info_tensor = Input(2);
const auto& anchors = Input(3);
auto* out_rois = Output(0);
auto* out_rois_probs = Output(1);
CAFFE_ENFORCE_EQ(scores.ndim(), 4, scores.ndim());
CAFFE_ENFORCE(scores.template IsType<float>(), scores.meta().name());
const auto num_images = scores.dim(0);
const auto A = scores.dim(1);
const auto H = scores.dim(2);
const auto W = scores.dim(3);
const auto box_dim = anchors.dim(1);
CAFFE_ENFORCE(box_dim == 4 || box_dim == 5);
const int K = H * W;
const int conv_layer_nboxes = K * A;
// Getting data members ready
// We'll sort the scores
// we want to remember their original indexes,
// ie their indexes in the tensor of shape (num_images,A,K)
// from the conv layer
// each row of d_conv_layer_indexes is at first initialized to 1..A*K
dev_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes);
int* d_conv_layer_indexes =
dev_conv_layer_indexes_.template mutable_data<int>();
// d_image_offset[i] = i*K*A for i from 1 to num_images+1
// Used by the segmented sort to only sort scores within one image
dev_image_offset_.Resize(num_images + 1);
int* d_image_offset = dev_image_offset_.template mutable_data<int>();
// The following calls to CUB primitives do nothing
// (because the first arg is nullptr)
// except setting cub_*_temp_storage_bytes
size_t cub_sort_temp_storage_bytes = 0;
float* flt_ptr = nullptr;
int* int_ptr = nullptr;
cub::DeviceSegmentedRadixSort::SortPairsDescending(
nullptr,
cub_sort_temp_storage_bytes,
flt_ptr,
flt_ptr,
int_ptr,
int_ptr,
num_images * conv_layer_nboxes,
num_images,
int_ptr,
int_ptr,
0,
8 * sizeof(float), // sort all bits
context_.cuda_stream());
// Allocate temporary storage for CUB
dev_cub_sort_buffer_.Resize(cub_sort_temp_storage_bytes);
void* d_cub_sort_temp_storage =
dev_cub_sort_buffer_.template mutable_data<char>();
size_t cub_select_temp_storage_bytes = 0;
char* char_ptr = nullptr;
cub::DeviceSelect::Flagged(
nullptr,
cub_select_temp_storage_bytes,
flt_ptr,
char_ptr,
flt_ptr,
int_ptr,
K * A,
context_.cuda_stream());
// Allocate temporary storage for CUB
dev_cub_select_buffer_.Resize(cub_select_temp_storage_bytes);
void* d_cub_select_temp_storage =
dev_cub_select_buffer_.template mutable_data<char>();
// Initialize :
// - each row of dev_conv_layer_indexes to 1..K*A
// - each d_nboxes to 0
// - d_image_offset[i] = K*A*i for i 1..num_images+1
// 2D grid
InitializeDataKernel<<<
(CAFFE_GET_BLOCKS(A * K), num_images),
CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1
0,
context_.cuda_stream()>>>(
num_images, conv_layer_nboxes, d_image_offset, d_conv_layer_indexes);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Sorting input scores
dev_sorted_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes);
dev_sorted_scores_.Resize(num_images, conv_layer_nboxes);
const float* d_in_scores = scores.data<float>();
int* d_sorted_conv_layer_indexes =
dev_sorted_conv_layer_indexes_.template mutable_data<int>();
float* d_sorted_scores = dev_sorted_scores_.template mutable_data<float>();
;
cub::DeviceSegmentedRadixSort::SortPairsDescending(
d_cub_sort_temp_storage,
cub_sort_temp_storage_bytes,
d_in_scores,
d_sorted_scores,
d_conv_layer_indexes,
d_sorted_conv_layer_indexes,
num_images * conv_layer_nboxes,
num_images,
d_image_offset,
d_image_offset + 1,
0,
8 * sizeof(float), // sort all bits
context_.cuda_stream());
// Keeping only the topN pre_nms
const int nboxes_to_generate = std::min(conv_layer_nboxes, rpn_pre_nms_topN_);
// Generating the boxes associated to the topN pre_nms scores
dev_boxes_.Resize(num_images, box_dim * nboxes_to_generate);
dev_boxes_keep_flags_.Resize(num_images, nboxes_to_generate);
const float* d_bbox_deltas = bbox_deltas.data<float>();
const float* d_anchors = anchors.data<float>();
const float* d_im_info_vec = im_info_tensor.data<float>();
float* d_boxes = dev_boxes_.template mutable_data<float>();
;
char* d_boxes_keep_flags =
dev_boxes_keep_flags_.template mutable_data<char>();
if (box_dim == 4) {
GeneratePreNMSUprightBoxesKernel<<<
(CAFFE_GET_BLOCKS(nboxes_to_generate), num_images),
CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1
0,
context_.cuda_stream()>>>(
d_sorted_conv_layer_indexes,
nboxes_to_generate,
d_bbox_deltas,
reinterpret_cast<const float4*>(d_anchors),
H,
W,
A,
feat_stride_,
rpn_min_size_,
d_im_info_vec,
num_images,
utils::BBOX_XFORM_CLIP_DEFAULT,
legacy_plus_one_,
reinterpret_cast<float4*>(d_boxes),
nboxes_to_generate,
d_sorted_scores,
d_boxes_keep_flags);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
GeneratePreNMSRotatedBoxesKernel<<<
(CAFFE_GET_BLOCKS(nboxes_to_generate), num_images),
CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1
0,
context_.cuda_stream()>>>(
d_sorted_conv_layer_indexes,
nboxes_to_generate,
d_bbox_deltas,
reinterpret_cast<const RotatedBox*>(d_anchors),
H,
W,
A,
feat_stride_,
rpn_min_size_,
d_im_info_vec,
num_images,
utils::BBOX_XFORM_CLIP_DEFAULT,
legacy_plus_one_,
angle_bound_on_,
angle_bound_lo_,
angle_bound_hi_,
clip_angle_thresh_,
reinterpret_cast<RotatedBox*>(d_boxes),
nboxes_to_generate,
d_sorted_scores,
d_boxes_keep_flags);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
const int nboxes_generated = nboxes_to_generate;
dev_image_prenms_boxes_.Resize(box_dim * nboxes_generated);
float* d_image_prenms_boxes =
dev_image_prenms_boxes_.template mutable_data<float>();
dev_image_prenms_scores_.Resize(nboxes_generated);
float* d_image_prenms_scores =
dev_image_prenms_scores_.template mutable_data<float>();
dev_image_boxes_keep_list_.Resize(nboxes_generated);
int* d_image_boxes_keep_list =
dev_image_boxes_keep_list_.template mutable_data<int>();
const int roi_cols = box_dim + 1;
const int max_postnms_nboxes = std::min(nboxes_generated, rpn_post_nms_topN_);
dev_postnms_rois_.Resize(roi_cols * num_images * max_postnms_nboxes);
dev_postnms_rois_probs_.Resize(num_images * max_postnms_nboxes);
float* d_postnms_rois = dev_postnms_rois_.template mutable_data<float>();
float* d_postnms_rois_probs =
dev_postnms_rois_probs_.template mutable_data<float>();
dev_prenms_nboxes_.Resize(num_images);
host_prenms_nboxes_.Resize(num_images);
int* d_prenms_nboxes = dev_prenms_nboxes_.template mutable_data<int>();
int* h_prenms_nboxes = host_prenms_nboxes_.template mutable_data<int>();
int nrois_in_output = 0;
for (int image_index = 0; image_index < num_images; ++image_index) {
// Sub matrices for current image
const float* d_image_boxes =
&d_boxes[image_index * nboxes_generated * box_dim];
const float* d_image_sorted_scores = &d_sorted_scores[image_index * K * A];
char* d_image_boxes_keep_flags =
&d_boxes_keep_flags[image_index * nboxes_generated];
float* d_image_postnms_rois = &d_postnms_rois[roi_cols * nrois_in_output];
float* d_image_postnms_rois_probs = &d_postnms_rois_probs[nrois_in_output];
// Moving valid boxes (ie the ones with d_boxes_keep_flags[ibox] == true)
// to the output tensors
if (box_dim == 4) {
cub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
reinterpret_cast<const float4*>(d_image_boxes),
d_image_boxes_keep_flags,
reinterpret_cast<float4*>(d_image_prenms_boxes),
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
} else {
cub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
reinterpret_cast<const RotatedBox*>(d_image_boxes),
d_image_boxes_keep_flags,
reinterpret_cast<RotatedBox*>(d_image_prenms_boxes),
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
}
cub::DeviceSelect::Flagged(
d_cub_select_temp_storage,
cub_select_temp_storage_bytes,
d_image_sorted_scores,
d_image_boxes_keep_flags,
d_image_prenms_scores,
d_prenms_nboxes,
nboxes_generated,
context_.cuda_stream());
host_prenms_nboxes_.CopyFrom(dev_prenms_nboxes_);
// We know prenms_boxes <= topN_prenms, because nboxes_generated <=
// topN_prenms. Calling NMS on the generated boxes
const int prenms_nboxes = *h_prenms_nboxes;
int nkeep;
utils::nms_gpu(
d_image_prenms_boxes,
prenms_nboxes,
rpn_nms_thresh_,
legacy_plus_one_,
d_image_boxes_keep_list,
&nkeep,
dev_nms_mask_,
host_nms_mask_,
&context_,
box_dim);
// All operations done after previous sort were keeping the relative order
// of the elements the elements are still sorted keep topN <=> truncate the
// array
const int postnms_nboxes = std::min(nkeep, rpn_post_nms_topN_);
// Moving the out boxes to the output tensors,
// adding the image_index dimension on the fly
if (box_dim == 4) {
WriteUprightBoxesOutput<<<
CAFFE_GET_BLOCKS(postnms_nboxes),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
reinterpret_cast<const float4*>(d_image_prenms_boxes),
d_image_prenms_scores,
d_image_boxes_keep_list,
postnms_nboxes,
image_index,
d_image_postnms_rois,
d_image_postnms_rois_probs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
WriteRotatedBoxesOutput<<<
CAFFE_GET_BLOCKS(postnms_nboxes),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
reinterpret_cast<const RotatedBox*>(d_image_prenms_boxes),
d_image_prenms_scores,
d_image_boxes_keep_list,
postnms_nboxes,
image_index,
d_image_postnms_rois,
d_image_postnms_rois_probs);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
nrois_in_output += postnms_nboxes;
}
// Using a buffer because we cannot call ShrinkTo
out_rois->Resize(nrois_in_output, roi_cols);
out_rois_probs->Resize(nrois_in_output);
float* d_out_rois = out_rois->template mutable_data<float>();
float* d_out_rois_probs = out_rois_probs->template mutable_data<float>();
CUDA_CHECK(cudaMemcpyAsync(
d_out_rois,
d_postnms_rois,
nrois_in_output * roi_cols * sizeof(float),
cudaMemcpyDeviceToDevice,
context_.cuda_stream()));
CUDA_CHECK(cudaMemcpyAsync(
d_out_rois_probs,
d_postnms_rois_probs,
nrois_in_output * sizeof(float),
cudaMemcpyDeviceToDevice,
context_.cuda_stream()));
return true;
}
REGISTER_CUDA_OPERATOR(GenerateProposals, GenerateProposalsOp<CUDAContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
GenerateProposals,
caffe2::GenerateProposalsOp<caffe2::CUDAContext>);
|
243138053949d72cedfb90b1f36b197fc0e7124a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <rocblas.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <stdlib.h>
#define DSIZE 1000000
// nTPB should be a power-of-2
#define nTPB 1024
#define MAX_KERNEL_BLOCKS 30
#define MAX_BLOCKS ((DSIZE/nTPB)+1)
#define MIN(a,b) ((a>b)?b:a)
#define FLOAT_MIN -1.0f
#include <time.h>
#include <sys/time.h>
unsigned long long dtime_usec(unsigned long long prev){
#define USECPSEC 1000000ULL
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
__device__ volatile float blk_vals[MAX_BLOCKS];
__device__ volatile int blk_idxs[MAX_BLOCKS];
__device__ int blk_num = 0;
__global__ void max_idx_kernel(const float *data, const int dsize, int *result) {
__shared__ volatile float vals[nTPB];
__shared__ volatile int idxs[nTPB];
int idx = threadIdx.x;
float my_val = FLOAT_MIN;
int my_idx = -1;
while (idx < dsize) {
if (data[idx] > my_val) {
my_val = data[idx];
my_idx = idx;
}
idx += blockDim.x;
}
vals[threadIdx.x] = my_val;
idxs[threadIdx.x] = my_idx;
__syncthreads();
for (int i = (nTPB>>1); i > 0; i>>=1) { // i = 512, 265, ...
if (threadIdx.x < i) { // threadIdx.x is between 0 to 1023
if (vals[threadIdx.x] < vals[threadIdx.x + i]) {
vals[threadIdx.x] = vals[threadIdx.x+i];
idxs[threadIdx.x] = idxs[threadIdx.x+i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = idxs[0];
}
}
int main(){
int nrElements = DSIZE;
float *d_vector, *h_vector;
h_vector = new float[DSIZE];
for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX;
h_vector[10] = 10; // create definite max element
hipblasHandle_t my_handle;
hipblasStatus_t my_status = hipblasCreate(&my_handle);
hipMalloc(&d_vector, DSIZE*sizeof(float));
hipMemcpy(d_vector, h_vector, DSIZE*sizeof(float), hipMemcpyHostToDevice);
int max_index = 0;
int *d_max_index;
hipMalloc(&d_max_index, sizeof(int));
unsigned long long dtime = dtime_usec(0);
hipLaunchKernelGGL(( max_idx_kernel), dim3(1), dim3(nTPB), 0, 0, d_vector, DSIZE, d_max_index);
hipDeviceSynchronize();
dtime = dtime_usec(dtime);
std::cout << "kernel time: " << dtime/(float)USECPSEC;
hipMemcpy(&max_index, d_max_index, sizeof(int), hipMemcpyDeviceToHost);
std::cout << " max index: " << max_index << std::endl;
return 0;
}
| 243138053949d72cedfb90b1f36b197fc0e7124a.cu | #include <unistd.h>
#include <cublas_v2.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <stdlib.h>
#define DSIZE 1000000
// nTPB should be a power-of-2
#define nTPB 1024
#define MAX_KERNEL_BLOCKS 30
#define MAX_BLOCKS ((DSIZE/nTPB)+1)
#define MIN(a,b) ((a>b)?b:a)
#define FLOAT_MIN -1.0f
#include <time.h>
#include <sys/time.h>
unsigned long long dtime_usec(unsigned long long prev){
#define USECPSEC 1000000ULL
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
__device__ volatile float blk_vals[MAX_BLOCKS];
__device__ volatile int blk_idxs[MAX_BLOCKS];
__device__ int blk_num = 0;
__global__ void max_idx_kernel(const float *data, const int dsize, int *result) {
__shared__ volatile float vals[nTPB];
__shared__ volatile int idxs[nTPB];
int idx = threadIdx.x;
float my_val = FLOAT_MIN;
int my_idx = -1;
while (idx < dsize) {
if (data[idx] > my_val) {
my_val = data[idx];
my_idx = idx;
}
idx += blockDim.x;
}
vals[threadIdx.x] = my_val;
idxs[threadIdx.x] = my_idx;
__syncthreads();
for (int i = (nTPB>>1); i > 0; i>>=1) { // i = 512, 265, ...
if (threadIdx.x < i) { // threadIdx.x is between 0 to 1023
if (vals[threadIdx.x] < vals[threadIdx.x + i]) {
vals[threadIdx.x] = vals[threadIdx.x+i];
idxs[threadIdx.x] = idxs[threadIdx.x+i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
*result = idxs[0];
}
}
int main(){
int nrElements = DSIZE;
float *d_vector, *h_vector;
h_vector = new float[DSIZE];
for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX;
h_vector[10] = 10; // create definite max element
cublasHandle_t my_handle;
cublasStatus_t my_status = cublasCreate(&my_handle);
cudaMalloc(&d_vector, DSIZE*sizeof(float));
cudaMemcpy(d_vector, h_vector, DSIZE*sizeof(float), cudaMemcpyHostToDevice);
int max_index = 0;
int *d_max_index;
cudaMalloc(&d_max_index, sizeof(int));
unsigned long long dtime = dtime_usec(0);
max_idx_kernel<<<1, nTPB>>>(d_vector, DSIZE, d_max_index);
cudaDeviceSynchronize();
dtime = dtime_usec(dtime);
std::cout << "kernel time: " << dtime/(float)USECPSEC;
cudaMemcpy(&max_index, d_max_index, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << " max index: " << max_index << std::endl;
return 0;
}
|
34356a7e8f23e7cb5ae534eb4dceadec065332f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHApply.cuh>
#include <c10/macros/Macros.h>
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType, typename Real>
struct IndexToScatterGatherOffsets<IndexType, Real, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterAddKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
gpuAtomicAdd(&tensor.data[tensorOffset], src.data[srcOffset]);
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<int64_t, IndexType> index,
Real value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#include <THH/generic/THHTensorScatterGather.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorScatterGather.hip>
#include <THH/THHGenerateBoolType.h>
| 34356a7e8f23e7cb5ae534eb4dceadec065332f9.cu | #include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCApply.cuh>
#include <c10/macros/Macros.h>
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType, typename Real>
struct IndexToScatterGatherOffsets<IndexType, Real, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterAddKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
gpuAtomicAdd(&tensor.data[tensorOffset], src.data[srcOffset]);
}
}
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<int64_t, IndexType> index,
Real value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#include <THC/generic/THCTensorScatterGather.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorScatterGather.cu>
#include <THC/THCGenerateBoolType.h>
|
2880135482652dc8762e219dd1332b0f233b4a66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_polygon.cuh"
void cudaPolygon::fillRectangle() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::fillRectangle: Filling x0 = %d , y0 = %d , x1 = %d , y1 "
"= %d , size = %d , sector = %d\n",
x0, y0, x1, y1, size, sector);
#endif
undeformed_xs.resize(stop + 1);
undeformed_xs[0].resize(size);
undeformed_ys.resize(stop + 1);
undeformed_ys[0].resize(size);
xy_center.resize(stop + 1);
// Make x an indexed vector
thrust::sequence(thrust::hip::par.on(domainSelectionStream),
undeformed_xs[0].begin(), undeformed_xs[0].end());
// Zip x and y and transform them
thrust::for_each(thrust::hip::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
RectFunctor(x0, y0, x1, y1));
}
v_points cudaPolygon::getUndXY0ToCPU() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getUndXY0ToCPU\n");
#endif
thrust::host_vector<float> h_xs = undeformed_xs[0];
thrust::host_vector<float> h_ys = undeformed_ys[0];
v_points vxy(h_xs.size());
for (int i = 0; i < vxy.size(); ++i) {
vxy[i] = std::make_pair(h_xs[i], h_ys[i]);
}
return vxy;
}
v_points cudaPolygon::getDefXY0ToCPU() {
/**
Method is used to make a host copy of the deformed points for plotting
purposes
*/
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getDefXY0ToCPU\n");
#endif
// Generate deformed points for the level 0 as a copy of the undeformed
// parameters
thrust::device_vector<float> deformed_xs0 = undeformed_xs[0];
thrust::device_vector<float> deformed_ys0 = undeformed_ys[0];
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *defX_ptr = thrust::raw_pointer_cast(deformed_xs0.data());
float *defY_ptr = thrust::raw_pointer_cast(deformed_ys0.data());
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( kModel_inPlace), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_parameters,
fittingModel,
numberOfPoints, defX_ptr,
defY_ptr, undCenter);
// Transfer deformed points to the CPU
thrust::host_vector<float> h_xs = deformed_xs0;
thrust::host_vector<float> h_ys = deformed_ys0;
// Make a v_points and return it
v_points vxy(h_xs.size());
for (int i = 0; i < vxy.size(); ++i) {
vxy[i] = std::make_pair(h_xs[i], h_ys[i]);
}
return vxy;
}
int cudaPolygon::getNumberOfPoints(int level) {
return undeformed_xs[level].size();
}
float *cudaPolygon::getUndXPtr(int level) {
return thrust::raw_pointer_cast(undeformed_xs[level].data());
}
float *cudaPolygon::getUndYPtr(int level) {
return thrust::raw_pointer_cast(undeformed_ys[level].data());
}
float *cudaPolygon::getUndCenter(int level) {
return thrust::raw_pointer_cast(xy_center[level].data());
}
float *cudaPolygon::getParameters(parameterTypeEnum parSrc) {
return parameters[parSrc];
}
CorrelationResult *cudaPolygon::getCorrelationResultsToCPU() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getCorrelationResultsToCPU , sector = %d\n", sector);
fflush(stdout);
#endif
scaleParametersForLevel(0);
// Copy last parameter set to gpuCorrelationResults
hipMemcpy(gpuCorrelationResults->resultingParameters,
parameters[parType_lastGood],
numberOfModelParameters * sizeof(float), hipMemcpyDeviceToDevice);
// Copy xy_center to gpuCorrelationResults
hipMemcpy(&gpuCorrelationResults->undCenterX,
thrust::raw_pointer_cast(xy_center[0].data()), 2 * sizeof(float),
hipMemcpyDeviceToDevice);
// Copy gpuCorrelationResults to cpuCorrelationResults
hipMemcpy(cpuCorrelationResults, gpuCorrelationResults,
sizeof(CorrelationResult), hipMemcpyDeviceToHost);
// Number of points comes from the thrust device_vector, which has its size in
// the cpu
cpuCorrelationResults->numberOfPoints = getNumberOfPoints(0);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getCorrelationResultsToCPU\n");
printf("cpuCorrelationResults->numberOfPoints = %d\n",
cpuCorrelationResults->numberOfPoints);
printf("cpuCorrelationResults->undCenterX = %f\n",
cpuCorrelationResults->undCenterX);
printf("cpuCorrelationResults->undCenterY = %f\n",
cpuCorrelationResults->undCenterY);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", cpuCorrelationResults->resultingParameters[i]);
}
printf("\n");
fflush(stdout);
#endif
return cpuCorrelationResults;
}
float *cudaPolygon::getGlobalABChi() { return globalABChi; }
void cudaPolygon::updateParameters(int numberOfModelParameters,
parameterTypeEnum parSrc,
parameterTypeEnum parDst,
hipStream_t stream) {
hipLaunchKernelGGL(( kUpdateParameters), dim3(1), dim3(32), 0, stream,
parameters[parSrc], parameters[parDst],
&globalABChi[numberOfModelParameters * numberOfModelParameters],
numberOfModelParameters);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::updateParameters type = %d , sector = %d\n", parDst,
sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parDst];
hipMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
hipMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::scaleParametersForLevel(int level) {
if (level == currentPyramidLevel)
return;
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::scaleParametersForLevel - before scale , sector = %d\n",
sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parType_lastGood];
hipMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
hipMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
#endif
int numerator = 1 << currentPyramidLevel;
int denominator = 1 << level;
float multiplier = (float)numerator / (float)denominator;
hipLaunchKernelGGL(( kScale), dim3(1), dim3(1), 0, 0, parameters[parType_lastGood], parameters[parType_tentative],
parameters[parType_saved], fittingModel, multiplier);
currentPyramidLevel = level;
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::scaleParametersForLevel - after scale\n");
h_par = new float[numberOfModelParameters];
hipMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
hipMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::initializeParametersLevel0(float *initialGuess_) {
// Put a marker on the nvvp CUDA profiler
roctxRangePushA("cudaPolygon::initializeParametersLevel0");
hipMemcpy(parameters[parType_lastGood], initialGuess_,
numberOfModelParameters * sizeof(float), hipMemcpyHostToDevice);
transferParameters(parType_lastGood, parType_tentative);
transferParameters(parType_lastGood, parType_saved);
currentPyramidLevel = 0;
roctxRangePop();
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::initializeParametersLevel0 , sector = %d\n", sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parType_lastGood];
hipMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
hipMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::updatePolygon(
deformationDescriptionEnum deformationDescription) {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::updatePolygon\n");
fflush(stdout);
#endif
switch (deformationDescription) {
case def_Eulerian:
return;
case def_Lagrangian: {
float dxy[2]{0.f, 0.f};
switch (fittingModel) {
case fm_UVUxUyVxVy:
case fm_UVQ:
case fm_UV:
hipMemcpy(&dxy[0], parameters[parType_lastGood], 2 * sizeof(float),
hipMemcpyDeviceToHost);
xy_center[0][0] += dxy[0];
xy_center[0][1] += dxy[1];
break;
case fm_U:
hipMemcpy(&dxy[0], parameters[parType_lastGood], 1 * sizeof(float),
hipMemcpyDeviceToHost);
xy_center[0][0] += dxy[0];
break;
default:
assert(false);
break;
}
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
translateFunctor(dxy[0], dxy[1]));
} break;
case def_strict_Lagrangian: {
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *undX_ptr = getUndXPtr(0);
float *undY_ptr = getUndYPtr(0);
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( kModel_inPlace), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0,
d_parameters,
fittingModel,
numberOfPoints, undX_ptr, undY_ptr, undCenter);
makeUndCenter0();
} break;
default:
assert(false);
break;
}
makeAllUndLevels();
makeAllUndCenters();
}
void cudaPolygonAnnular::updatePolygon(
deformationDescriptionEnum deformationDescription) {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygonAnnular::updatePolygon\n");
#endif
switch (deformationDescription) {
case def_Eulerian:
return;
case def_Lagrangian: {
float dx, dy;
switch (fittingModel) {
case fm_U:
xy_center[0][0] += parameters[parType_lastGood][0];
dx = parameters[parType_lastGood][0];
dy = 0.f;
break;
case fm_UV:
case fm_UVQ:
case fm_UVUxUyVxVy:
xy_center[0][0] += parameters[parType_lastGood][0];
xy_center[0][1] += parameters[parType_lastGood][1];
dx = parameters[parType_lastGood][0];
dy = parameters[parType_lastGood][1];
break;
default:
assert(false);
break;
}
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
translateFunctor(dx, dy));
} break;
case def_strict_Lagrangian: {
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *undX_ptr = getUndXPtr(0);
float *undY_ptr = getUndYPtr(0);
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( kModel_inPlace), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0,
d_parameters,
fittingModel,
numberOfPoints, undX_ptr, undY_ptr, undCenter);
makeUndCenter0();
} break;
default:
assert(false);
break;
}
makeAllUndLevels();
makeAllUndCenters();
}
void cudaPolygon::transferParameters(parameterTypeEnum parSrc,
parameterTypeEnum parDst) {
if (parDst == parSrc)
return;
hipMemcpy(parameters[parDst], parameters[parSrc],
numberOfModelParameters * sizeof(float), hipMemcpyDeviceToDevice);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::transferParameters() %d -> %d\n", parSrc, parDst);
#endif
}
void cudaPolygon::makeAllUndLevels() {
int prevLevel = 0;
int firstLevel = (start == 0 ? step : start);
for (int ilevel = firstLevel; ilevel <= stop; ilevel += step) {
undeformed_xs[ilevel].resize(undeformed_xs[prevLevel].size());
undeformed_ys[ilevel].resize(undeformed_ys[prevLevel].size());
ZipIt zipEnd = thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[prevLevel].begin(),
undeformed_ys[prevLevel].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[prevLevel].end(), undeformed_ys[prevLevel].end())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].begin(), undeformed_ys[ilevel].begin())),
copyFunctor(prevLevel, ilevel));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[ilevel].erase(xsEnd, undeformed_xs[ilevel].end());
undeformed_ys[ilevel].erase(ysEnd, undeformed_ys[ilevel].end());
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].begin(), undeformed_ys[ilevel].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].end(), undeformed_ys[ilevel].end())),
scale2DFunctor(prevLevel, ilevel));
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeLevels ilevel = %d , sector = %d\n", ilevel,
sector);
#endif
#if DEBUG_CUDA_POLYGON_POINTS
if (ilevel == firstLevel) {
printf(" cudaPolygon::makeLevels prevLevel = %d\n", prevLevel);
thrust::host_vector<float> h_xs = undeformed_xs[prevLevel];
thrust::host_vector<float> h_ys = undeformed_ys[prevLevel];
for (int i = 0; i < h_xs.size(); ++i) {
printf("x[ %d ] = %f , y[ %d ] = %f \n", i, h_xs[i], i, h_ys[i]);
}
}
printf(" cudaPolygon::makeLevels ilevel = %d\n", ilevel);
thrust::host_vector<float> h_xs = undeformed_xs[ilevel];
thrust::host_vector<float> h_ys = undeformed_ys[ilevel];
for (int i = 0; i < h_xs.size(); ++i) {
printf("x[ %d ] = %f , y[ %d ] = %f \n", i, h_xs[i], i, h_ys[i]);
}
#endif
prevLevel = ilevel;
}
allocateGlobalABChi();
}
void cudaPolygon::allocateGlobalABChi() {
/*
* Allocate device global memory to perform global reduction. One per block.
* Big enought for the first pyramid step (start)
*/
int numberOfPoints = getNumberOfPoints(start);
int numberOfBlocks =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int globalSize =
sizeof(float) * numberOfBlocks *
(1 + numberOfModelParameters * (1 + numberOfModelParameters));
if (globalABChi)
deallocateGlobalABChi();
hipError_t err = hipMalloc((void **)&globalABChi, globalSize);
if (err != hipSuccess) {
printf("Failed to allocate global globalABCHI (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaPolygon::deallocateGlobalABChi() {
if (!globalABChi)
return;
// Free the global globalABCHI, one per block, to perform global reduction
hipError_t err = hipFree(globalABChi);
if (err != hipSuccess) {
printf("Failed to free device globalABCHI (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaPolygon::makeUndCenter0() {
xy_center[0].resize(2);
xy_center[0][0] =
thrust::reduce(undeformed_xs[0].begin(), undeformed_xs[0].end()) /
(float)undeformed_xs[0].size();
xy_center[0][1] =
thrust::reduce(undeformed_ys[0].begin(), undeformed_ys[0].end()) /
(float)undeformed_ys[0].size();
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeUndCenter0\n");
#endif
}
void cudaPolygon::makeAllUndCenters() {
int prevLevel = 0;
int firstLevel = (start == 0 ? step : start);
for (int ilevel = firstLevel; ilevel <= stop; ilevel += step) {
xy_center[ilevel] = xy_center[prevLevel];
thrust::transform(xy_center[ilevel].begin(), xy_center[ilevel].end(),
xy_center[ilevel].begin(),
scale1DFunctor(prevLevel, ilevel));
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeAllUndCenters ilevel = %d , sector = %d\n", ilevel,
sector);
#endif
#if DEBUG_CUDA_POLYGON_POINTS
if (ilevel == firstLevel) {
printf(" cudaPolygon::makeAllUndCenters prevLevel = %d\n", prevLevel);
thrust::host_vector<float> h_xs = xy_center[prevLevel];
printf("x_center = %f , y_center = %f \n", h_xs[0], h_xs[1]);
}
printf(" cudaPolygon::makeAllUndCenters ilevel = %d\n", ilevel);
thrust::host_vector<float> h_xs = xy_center[ilevel];
printf("x_center = %f , y_center = %f \n", h_xs[0], h_xs[1]),
#endif
prevLevel = ilevel;
}
}
void cudaPolygonAnnular::cleanAnnularRectangle0(float r, float dr, float a,
float da, float cx, float cy,
int as) {
ZipIt zipEnd = thrust::remove_if(
thrust::hip::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(undeformed_xs[0].begin(),
undeformed_ys[0].begin())),
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[0].end(), undeformed_ys[0].end())),
removeAnnularFunctor(r, dr, a, da, cx, cy, as));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[0].erase(xsEnd, undeformed_xs[0].end());
undeformed_ys[0].erase(ysEnd, undeformed_ys[0].end());
}
void cudaPolygonBlob::cleanBlobRectangle0(v_points blobContour) {
LineEquationsDevice lineEquations = makeLineEquations(blobContour);
v_pairs contours = blobContour;
ZipIt zipEnd = thrust::remove_if(
thrust::hip::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(undeformed_xs[0].begin(),
undeformed_ys[0].begin())),
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[0].end(), undeformed_ys[0].end())),
removeBlobFunctor(contours.data(), lineEquations.data(),
lineEquations.size()));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[0].erase(xsEnd, undeformed_xs[0].end());
undeformed_ys[0].erase(ysEnd, undeformed_ys[0].end());
}
LineEquationsDevice cudaPolygonBlob::makeLineEquations(v_points blobContour) {
LineEquationsHost lineEquationsH(blobContour.size());
for (unsigned int i = 0; i < blobContour.size(); ++i) {
float v1x1, v1y1, v1x2, v1y2;
v1x1 = blobContour[i].first;
v1y1 = blobContour[i].second;
if (i != blobContour.size() - 1) {
v1x2 = blobContour[i + 1].first;
v1y2 = blobContour[i + 1].second;
} else {
v1x2 = blobContour[0].first;
v1y2 = blobContour[0].second;
}
thrust::get<0>(lineEquationsH[i]) = v1y2 - v1y1;
thrust::get<1>(lineEquationsH[i]) = v1x1 - v1x2;
thrust::get<2>(lineEquationsH[i]) = (v1x2 * v1y1) - (v1x1 * v1y2);
}
LineEquationsDevice lineEquationsD = lineEquationsH;
return lineEquationsD;
}
| 2880135482652dc8762e219dd1332b0f233b4a66.cu | #include "cuda_polygon.cuh"
void cudaPolygon::fillRectangle() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::fillRectangle: Filling x0 = %d , y0 = %d , x1 = %d , y1 "
"= %d , size = %d , sector = %d\n",
x0, y0, x1, y1, size, sector);
#endif
undeformed_xs.resize(stop + 1);
undeformed_xs[0].resize(size);
undeformed_ys.resize(stop + 1);
undeformed_ys[0].resize(size);
xy_center.resize(stop + 1);
// Make x an indexed vector
thrust::sequence(thrust::cuda::par.on(domainSelectionStream),
undeformed_xs[0].begin(), undeformed_xs[0].end());
// Zip x and y and transform them
thrust::for_each(thrust::cuda::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
RectFunctor(x0, y0, x1, y1));
}
v_points cudaPolygon::getUndXY0ToCPU() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getUndXY0ToCPU\n");
#endif
thrust::host_vector<float> h_xs = undeformed_xs[0];
thrust::host_vector<float> h_ys = undeformed_ys[0];
v_points vxy(h_xs.size());
for (int i = 0; i < vxy.size(); ++i) {
vxy[i] = std::make_pair(h_xs[i], h_ys[i]);
}
return vxy;
}
v_points cudaPolygon::getDefXY0ToCPU() {
/**
Method is used to make a host copy of the deformed points for plotting
purposes
*/
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getDefXY0ToCPU\n");
#endif
// Generate deformed points for the level 0 as a copy of the undeformed
// parameters
thrust::device_vector<float> deformed_xs0 = undeformed_xs[0];
thrust::device_vector<float> deformed_ys0 = undeformed_ys[0];
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *defX_ptr = thrust::raw_pointer_cast(deformed_xs0.data());
float *defY_ptr = thrust::raw_pointer_cast(deformed_ys0.data());
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
kModel_inPlace<<<blocksPerGrid, THREADS_PER_BLOCK>>>(d_parameters,
fittingModel,
numberOfPoints, defX_ptr,
defY_ptr, undCenter);
// Transfer deformed points to the CPU
thrust::host_vector<float> h_xs = deformed_xs0;
thrust::host_vector<float> h_ys = deformed_ys0;
// Make a v_points and return it
v_points vxy(h_xs.size());
for (int i = 0; i < vxy.size(); ++i) {
vxy[i] = std::make_pair(h_xs[i], h_ys[i]);
}
return vxy;
}
int cudaPolygon::getNumberOfPoints(int level) {
return undeformed_xs[level].size();
}
float *cudaPolygon::getUndXPtr(int level) {
return thrust::raw_pointer_cast(undeformed_xs[level].data());
}
float *cudaPolygon::getUndYPtr(int level) {
return thrust::raw_pointer_cast(undeformed_ys[level].data());
}
float *cudaPolygon::getUndCenter(int level) {
return thrust::raw_pointer_cast(xy_center[level].data());
}
float *cudaPolygon::getParameters(parameterTypeEnum parSrc) {
return parameters[parSrc];
}
CorrelationResult *cudaPolygon::getCorrelationResultsToCPU() {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getCorrelationResultsToCPU , sector = %d\n", sector);
fflush(stdout);
#endif
scaleParametersForLevel(0);
// Copy last parameter set to gpuCorrelationResults
cudaMemcpy(gpuCorrelationResults->resultingParameters,
parameters[parType_lastGood],
numberOfModelParameters * sizeof(float), cudaMemcpyDeviceToDevice);
// Copy xy_center to gpuCorrelationResults
cudaMemcpy(&gpuCorrelationResults->undCenterX,
thrust::raw_pointer_cast(xy_center[0].data()), 2 * sizeof(float),
cudaMemcpyDeviceToDevice);
// Copy gpuCorrelationResults to cpuCorrelationResults
cudaMemcpy(cpuCorrelationResults, gpuCorrelationResults,
sizeof(CorrelationResult), cudaMemcpyDeviceToHost);
// Number of points comes from the thrust device_vector, which has its size in
// the cpu
cpuCorrelationResults->numberOfPoints = getNumberOfPoints(0);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::getCorrelationResultsToCPU\n");
printf("cpuCorrelationResults->numberOfPoints = %d\n",
cpuCorrelationResults->numberOfPoints);
printf("cpuCorrelationResults->undCenterX = %f\n",
cpuCorrelationResults->undCenterX);
printf("cpuCorrelationResults->undCenterY = %f\n",
cpuCorrelationResults->undCenterY);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", cpuCorrelationResults->resultingParameters[i]);
}
printf("\n");
fflush(stdout);
#endif
return cpuCorrelationResults;
}
float *cudaPolygon::getGlobalABChi() { return globalABChi; }
void cudaPolygon::updateParameters(int numberOfModelParameters,
parameterTypeEnum parSrc,
parameterTypeEnum parDst,
cudaStream_t stream) {
kUpdateParameters<<<1, 32, 0, stream>>>(
parameters[parSrc], parameters[parDst],
&globalABChi[numberOfModelParameters * numberOfModelParameters],
numberOfModelParameters);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::updateParameters type = %d , sector = %d\n", parDst,
sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parDst];
cudaMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
cudaMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::scaleParametersForLevel(int level) {
if (level == currentPyramidLevel)
return;
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::scaleParametersForLevel - before scale , sector = %d\n",
sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parType_lastGood];
cudaMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
cudaMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
#endif
int numerator = 1 << currentPyramidLevel;
int denominator = 1 << level;
float multiplier = (float)numerator / (float)denominator;
kScale<<<1, 1>>>(parameters[parType_lastGood], parameters[parType_tentative],
parameters[parType_saved], fittingModel, multiplier);
currentPyramidLevel = level;
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::scaleParametersForLevel - after scale\n");
h_par = new float[numberOfModelParameters];
cudaMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
cudaMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::initializeParametersLevel0(float *initialGuess_) {
// Put a marker on the nvvp CUDA profiler
nvtxRangePushA("cudaPolygon::initializeParametersLevel0");
cudaMemcpy(parameters[parType_lastGood], initialGuess_,
numberOfModelParameters * sizeof(float), cudaMemcpyHostToDevice);
transferParameters(parType_lastGood, parType_tentative);
transferParameters(parType_lastGood, parType_saved);
currentPyramidLevel = 0;
nvtxRangePop();
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::initializeParametersLevel0 , sector = %d\n", sector);
float *h_par = new float[numberOfModelParameters];
float *d_par = parameters[parType_lastGood];
cudaMemcpy(h_par, d_par, numberOfModelParameters * sizeof(float),
cudaMemcpyDeviceToHost);
for (int i = 0; i < numberOfModelParameters; ++i) {
printf("%14.4e", h_par[i]);
}
printf("\n");
fflush(stdout);
delete[] h_par;
#endif
}
void cudaPolygon::updatePolygon(
deformationDescriptionEnum deformationDescription) {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::updatePolygon\n");
fflush(stdout);
#endif
switch (deformationDescription) {
case def_Eulerian:
return;
case def_Lagrangian: {
float dxy[2]{0.f, 0.f};
switch (fittingModel) {
case fm_UVUxUyVxVy:
case fm_UVQ:
case fm_UV:
cudaMemcpy(&dxy[0], parameters[parType_lastGood], 2 * sizeof(float),
cudaMemcpyDeviceToHost);
xy_center[0][0] += dxy[0];
xy_center[0][1] += dxy[1];
break;
case fm_U:
cudaMemcpy(&dxy[0], parameters[parType_lastGood], 1 * sizeof(float),
cudaMemcpyDeviceToHost);
xy_center[0][0] += dxy[0];
break;
default:
assert(false);
break;
}
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
translateFunctor(dxy[0], dxy[1]));
} break;
case def_strict_Lagrangian: {
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *undX_ptr = getUndXPtr(0);
float *undY_ptr = getUndYPtr(0);
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
kModel_inPlace<<<blocksPerGrid, THREADS_PER_BLOCK>>>(
d_parameters,
fittingModel,
numberOfPoints, undX_ptr, undY_ptr, undCenter);
makeUndCenter0();
} break;
default:
assert(false);
break;
}
makeAllUndLevels();
makeAllUndCenters();
}
void cudaPolygonAnnular::updatePolygon(
deformationDescriptionEnum deformationDescription) {
#if DEBUG_CUDA_POLYGON
printf("cudaPolygonAnnular::updatePolygon\n");
#endif
switch (deformationDescription) {
case def_Eulerian:
return;
case def_Lagrangian: {
float dx, dy;
switch (fittingModel) {
case fm_U:
xy_center[0][0] += parameters[parType_lastGood][0];
dx = parameters[parType_lastGood][0];
dy = 0.f;
break;
case fm_UV:
case fm_UVQ:
case fm_UVUxUyVxVy:
xy_center[0][0] += parameters[parType_lastGood][0];
xy_center[0][1] += parameters[parType_lastGood][1];
dx = parameters[parType_lastGood][0];
dy = parameters[parType_lastGood][1];
break;
default:
assert(false);
break;
}
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].begin(), undeformed_ys[0].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[0].end(), undeformed_ys[0].end())),
translateFunctor(dx, dy));
} break;
case def_strict_Lagrangian: {
float *d_parameters = getParameters(parType_lastGood);
int numberOfPoints = getNumberOfPoints(0);
float *undX_ptr = getUndXPtr(0);
float *undY_ptr = getUndYPtr(0);
float *undCenter = getUndCenter(0);
int blocksPerGrid =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
kModel_inPlace<<<blocksPerGrid, THREADS_PER_BLOCK>>>(
d_parameters,
fittingModel,
numberOfPoints, undX_ptr, undY_ptr, undCenter);
makeUndCenter0();
} break;
default:
assert(false);
break;
}
makeAllUndLevels();
makeAllUndCenters();
}
void cudaPolygon::transferParameters(parameterTypeEnum parSrc,
parameterTypeEnum parDst) {
if (parDst == parSrc)
return;
cudaMemcpy(parameters[parDst], parameters[parSrc],
numberOfModelParameters * sizeof(float), cudaMemcpyDeviceToDevice);
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::transferParameters() %d -> %d\n", parSrc, parDst);
#endif
}
void cudaPolygon::makeAllUndLevels() {
int prevLevel = 0;
int firstLevel = (start == 0 ? step : start);
for (int ilevel = firstLevel; ilevel <= stop; ilevel += step) {
undeformed_xs[ilevel].resize(undeformed_xs[prevLevel].size());
undeformed_ys[ilevel].resize(undeformed_ys[prevLevel].size());
ZipIt zipEnd = thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[prevLevel].begin(),
undeformed_ys[prevLevel].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[prevLevel].end(), undeformed_ys[prevLevel].end())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].begin(), undeformed_ys[ilevel].begin())),
copyFunctor(prevLevel, ilevel));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[ilevel].erase(xsEnd, undeformed_xs[ilevel].end());
undeformed_ys[ilevel].erase(ysEnd, undeformed_ys[ilevel].end());
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].begin(), undeformed_ys[ilevel].begin())),
thrust::make_zip_iterator(thrust::make_tuple(
undeformed_xs[ilevel].end(), undeformed_ys[ilevel].end())),
scale2DFunctor(prevLevel, ilevel));
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeLevels ilevel = %d , sector = %d\n", ilevel,
sector);
#endif
#if DEBUG_CUDA_POLYGON_POINTS
if (ilevel == firstLevel) {
printf(" cudaPolygon::makeLevels prevLevel = %d\n", prevLevel);
thrust::host_vector<float> h_xs = undeformed_xs[prevLevel];
thrust::host_vector<float> h_ys = undeformed_ys[prevLevel];
for (int i = 0; i < h_xs.size(); ++i) {
printf("x[ %d ] = %f , y[ %d ] = %f \n", i, h_xs[i], i, h_ys[i]);
}
}
printf(" cudaPolygon::makeLevels ilevel = %d\n", ilevel);
thrust::host_vector<float> h_xs = undeformed_xs[ilevel];
thrust::host_vector<float> h_ys = undeformed_ys[ilevel];
for (int i = 0; i < h_xs.size(); ++i) {
printf("x[ %d ] = %f , y[ %d ] = %f \n", i, h_xs[i], i, h_ys[i]);
}
#endif
prevLevel = ilevel;
}
allocateGlobalABChi();
}
void cudaPolygon::allocateGlobalABChi() {
/*
* Allocate device global memory to perform global reduction. One per block.
* Big enought for the first pyramid step (start)
*/
int numberOfPoints = getNumberOfPoints(start);
int numberOfBlocks =
(numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int globalSize =
sizeof(float) * numberOfBlocks *
(1 + numberOfModelParameters * (1 + numberOfModelParameters));
if (globalABChi)
deallocateGlobalABChi();
cudaError_t err = cudaMalloc((void **)&globalABChi, globalSize);
if (err != cudaSuccess) {
printf("Failed to allocate global globalABCHI (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaPolygon::deallocateGlobalABChi() {
if (!globalABChi)
return;
// Free the global globalABCHI, one per block, to perform global reduction
cudaError_t err = cudaFree(globalABChi);
if (err != cudaSuccess) {
printf("Failed to free device globalABCHI (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaPolygon::makeUndCenter0() {
xy_center[0].resize(2);
xy_center[0][0] =
thrust::reduce(undeformed_xs[0].begin(), undeformed_xs[0].end()) /
(float)undeformed_xs[0].size();
xy_center[0][1] =
thrust::reduce(undeformed_ys[0].begin(), undeformed_ys[0].end()) /
(float)undeformed_ys[0].size();
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeUndCenter0\n");
#endif
}
void cudaPolygon::makeAllUndCenters() {
int prevLevel = 0;
int firstLevel = (start == 0 ? step : start);
for (int ilevel = firstLevel; ilevel <= stop; ilevel += step) {
xy_center[ilevel] = xy_center[prevLevel];
thrust::transform(xy_center[ilevel].begin(), xy_center[ilevel].end(),
xy_center[ilevel].begin(),
scale1DFunctor(prevLevel, ilevel));
#if DEBUG_CUDA_POLYGON
printf("cudaPolygon::makeAllUndCenters ilevel = %d , sector = %d\n", ilevel,
sector);
#endif
#if DEBUG_CUDA_POLYGON_POINTS
if (ilevel == firstLevel) {
printf(" cudaPolygon::makeAllUndCenters prevLevel = %d\n", prevLevel);
thrust::host_vector<float> h_xs = xy_center[prevLevel];
printf("x_center = %f , y_center = %f \n", h_xs[0], h_xs[1]);
}
printf(" cudaPolygon::makeAllUndCenters ilevel = %d\n", ilevel);
thrust::host_vector<float> h_xs = xy_center[ilevel];
printf("x_center = %f , y_center = %f \n", h_xs[0], h_xs[1]),
#endif
prevLevel = ilevel;
}
}
void cudaPolygonAnnular::cleanAnnularRectangle0(float r, float dr, float a,
float da, float cx, float cy,
int as) {
ZipIt zipEnd = thrust::remove_if(
thrust::cuda::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(undeformed_xs[0].begin(),
undeformed_ys[0].begin())),
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[0].end(), undeformed_ys[0].end())),
removeAnnularFunctor(r, dr, a, da, cx, cy, as));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[0].erase(xsEnd, undeformed_xs[0].end());
undeformed_ys[0].erase(ysEnd, undeformed_ys[0].end());
}
void cudaPolygonBlob::cleanBlobRectangle0(v_points blobContour) {
LineEquationsDevice lineEquations = makeLineEquations(blobContour);
v_pairs contours = blobContour;
ZipIt zipEnd = thrust::remove_if(
thrust::cuda::par.on(domainSelectionStream),
thrust::make_zip_iterator(thrust::make_tuple(undeformed_xs[0].begin(),
undeformed_ys[0].begin())),
thrust::make_zip_iterator(
thrust::make_tuple(undeformed_xs[0].end(), undeformed_ys[0].end())),
removeBlobFunctor(contours.data(), lineEquations.data(),
lineEquations.size()));
TupleIt tupleEnd = zipEnd.get_iterator_tuple();
VIt xsEnd = thrust::get<0>(tupleEnd);
VIt ysEnd = thrust::get<1>(tupleEnd);
undeformed_xs[0].erase(xsEnd, undeformed_xs[0].end());
undeformed_ys[0].erase(ysEnd, undeformed_ys[0].end());
}
LineEquationsDevice cudaPolygonBlob::makeLineEquations(v_points blobContour) {
LineEquationsHost lineEquationsH(blobContour.size());
for (unsigned int i = 0; i < blobContour.size(); ++i) {
float v1x1, v1y1, v1x2, v1y2;
v1x1 = blobContour[i].first;
v1y1 = blobContour[i].second;
if (i != blobContour.size() - 1) {
v1x2 = blobContour[i + 1].first;
v1y2 = blobContour[i + 1].second;
} else {
v1x2 = blobContour[0].first;
v1y2 = blobContour[0].second;
}
thrust::get<0>(lineEquationsH[i]) = v1y2 - v1y1;
thrust::get<1>(lineEquationsH[i]) = v1x1 - v1x2;
thrust::get<2>(lineEquationsH[i]) = (v1x2 * v1y1) - (v1x1 * v1y2);
}
LineEquationsDevice lineEquationsD = lineEquationsH;
return lineEquationsD;
}
|
6f766438dfc82eeb565fc7b998c8832ab80e5126.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
// Output generated from Teg
#include "tegpixel.h"
#include "renderpixel.h"
// End Temporary placeholder.
#define ALPHA 0.001
__global__
void pt_loss_derivative(int w, int h,
int* tids,
int* pids,
int num_jobs,
float *vertices,
int *indices,
float *tcolors, float* pcolors,
float *d_vertices, float *d_tcolors)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= num_jobs)
return;
auto tri_id = tids[idx];
auto pixel_id = pids[idx];
auto index1 = indices[tri_id * 3 + 0];
auto index2 = indices[tri_id * 3 + 1];
auto index3 = indices[tri_id * 3 + 2];
// Run generated teg function.
auto outvals = tegpixel(
vertices[index1 * 2 + 0],
vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0],
vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0],
vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
pcolors[pixel_id * 3 + 0],
pcolors[pixel_id * 3 + 1],
pcolors[pixel_id * 3 + 2],
tcolors[tri_id * 3 + 0],
tcolors[tri_id * 3 + 1],
tcolors[tri_id * 3 + 2]
);
/*auto temp_outvals = tegpixel(
0, -2,
0, 2,
1, 0,
-1, 1,
-1, 1,
1, 1, 1,
0.5, 0.5, 0.5
);
if (idx == 0){
printf("%f, %f, %f, %f, %f, %f\n",
temp_outvals.o[0], temp_outvals.o[1], temp_outvals.o[2],
temp_outvals.o[3], temp_outvals.o[4], temp_outvals.o[5]);
}*/
/*
if (index3 == 366 && outvals.o[0] != 0.f){
printf("\
Hello from block %d, thread %d\n\
Tri_id %d, Pix_id %d\n\
Pix-color %f, %f, %f\n\
T-colors %f, %f, %f\n\
Out-vals %f, %f, %f\n\
0: %f, %f\n\
1: %f, %f\n\
2: %f, %f\n\
x: %f, %f\n\
y: %f, %f\n\
idxs: %d, %d, %d\n",
blockIdx.x, threadIdx.x,
tri_id, pixel_id,
pcolors[pixel_id * 3 + 0], pcolors[pixel_id * 3 + 1], pcolors[pixel_id * 3 + 2],
tcolors[tri_id * 3 + 0], tcolors[tri_id * 3 + 1], tcolors[tri_id * 3 + 2],
outvals.o[0], outvals.o[1], outvals.o[2],
vertices[index1 * 2 + 0], vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0], vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0], vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
index1, index2, index3);
}*/
// Accumulate derivatives.
// TODO: There needs to be an easier way to accumulate derivatives..
atomicAdd(&d_vertices[index1 * 2 + 0], outvals[0]);
atomicAdd(&d_vertices[index2 * 2 + 0], outvals[1]);
atomicAdd(&d_vertices[index3 * 2 + 0], outvals[2]);
atomicAdd(&d_vertices[index1 * 2 + 1], outvals[3]);
atomicAdd(&d_vertices[index2 * 2 + 1], outvals[4]);
atomicAdd(&d_vertices[index3 * 2 + 1], outvals[5]);
//if (index3 == 366) {
// printf("pt_loss_derivative for %d: %f\n", index3, d_vertices[index3 * 2 + 0]);
//}
atomicAdd(&d_tcolors[tri_id * 3 + 0], outvals[13]);
atomicAdd(&d_tcolors[tri_id * 3 + 1], outvals[14]);
atomicAdd(&d_tcolors[tri_id * 3 + 2], outvals[15]);
}
__global__
void render_triangles(int w, int h,
int* tids,
int* pids,
int num_jobs,
float *vertices,
int *indices,
float *tcolors,
float *image)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= num_jobs) return;
auto tri_id = tids[idx];
auto pixel_id = pids[idx];
auto index1 = indices[tri_id * 3 + 0];
auto index2 = indices[tri_id * 3 + 1];
auto index3 = indices[tri_id * 3 + 2];
// Run generated teg function.
auto outvals = renderpixel(
vertices[index1 * 2 + 0],
vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0],
vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0],
vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
tcolors[tri_id * 3 + 0],
tcolors[tri_id * 3 + 1],
tcolors[tri_id * 3 + 2]
);
// Accumulate image.
atomicAdd(&image[pixel_id * 3 + 0], outvals[0]);
atomicAdd(&image[pixel_id * 3 + 1], outvals[1]);
atomicAdd(&image[pixel_id * 3 + 2], outvals[2]);
}
__global__
void update_values(int nx, int ny,
float *vertices, float *tcolors,
float *d_vertices, float *d_tcolors,
float alpha)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int x = idx / (ny + 1);
int y = idx % (ny + 1);
if (x > 0 && y > 0 && x < nx && y < ny){
vertices[idx * 2 + 0] = vertices[idx * 2 + 0] - alpha * 100 * d_vertices[idx * 2 + 0];
vertices[idx * 2 + 1] = vertices[idx * 2 + 1] - alpha * 100 * d_vertices[idx * 2 + 1];
}
x = idx / (ny);
y = idx % (ny);
if (x >= 0 && y >= 0 && x < nx && y < ny){
tcolors[idx * 6 + 0] = tcolors[idx * 6 + 0] - alpha * d_tcolors[idx * 6 + 0];
tcolors[idx * 6 + 1] = tcolors[idx * 6 + 1] - alpha * d_tcolors[idx * 6 + 1];
tcolors[idx * 6 + 2] = tcolors[idx * 6 + 2] - alpha * d_tcolors[idx * 6 + 2];
tcolors[idx * 6 + 3] = tcolors[idx * 6 + 3] - alpha * d_tcolors[idx * 6 + 3];
tcolors[idx * 6 + 4] = tcolors[idx * 6 + 4] - alpha * d_tcolors[idx * 6 + 4];
tcolors[idx * 6 + 5] = tcolors[idx * 6 + 5] - alpha * d_tcolors[idx * 6 + 5];
}
}
__global__
void set_zero(float* data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
data[idx] = 0.f;
}
__host__
void build_initial_triangles(float* vertices, int* indices,
float* tcolors,
int nx, int ny,
int image_width, int image_height) {
float tri_width = (float)(image_width) / nx;
float tri_height = (float)(image_height) / ny;
for(int i = 0; i < nx + 1; i++) {
for(int j = 0; j < ny + 1; j++) {
vertices[(i * (ny + 1) + j) * 2 + 0] = tri_width * i;
vertices[(i * (ny + 1) + j) * 2 + 1] = tri_height * j;
}
}
for(int i = 0; i < nx; i++) {
for(int j = 0; j < ny; j++) {
indices[(i * ny + j) * 6 + 0] = ((i + 0) * (ny + 1) + j + 0);
indices[(i * ny + j) * 6 + 1] = ((i + 0) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 2] = ((i + 1) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 3] = ((i + 0) * (ny + 1) + j + 0);
indices[(i * ny + j) * 6 + 4] = ((i + 1) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 5] = ((i + 1) * (ny + 1) + j + 0);
}
}
for(int i = 0; i < nx; i++)
for(int j = 0; j < ny; j++) {
tcolors[((i * ny) + j) * 6 + 0] = 0.f;
tcolors[((i * ny) + j) * 6 + 1] = 0.f;
tcolors[((i * ny) + j) * 6 + 2] = 0.f;
tcolors[((i * ny) + j) * 6 + 3] = 0.f;
tcolors[((i * ny) + j) * 6 + 4] = 0.f;
tcolors[((i * ny) + j) * 6 + 5] = 0.f;
}
}
int generate_jobs( int image_width, int image_height,
int nx, int ny,
float* vertices, int* indices,
int* tids, int* pids ) {
int job_count = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
for(int t = 0; t < 2; t++) {
int idx = (i * ny + j) * 2 + t;
int i0 = indices[idx * 3 + 0];
int i1 = indices[idx * 3 + 1];
int i2 = indices[idx * 3 + 2];
float vx0 = vertices[i0 * 2 + 0];
float vy0 = vertices[i0 * 2 + 1];
float vx1 = vertices[i1 * 2 + 0];
float vy1 = vertices[i1 * 2 + 1];
float vx2 = vertices[i2 * 2 + 0];
float vy2 = vertices[i2 * 2 + 1];
int max_x = (int)(::ceil( ::max(vx0, ::max(vx1, vx2)))) + 1;
int max_y = (int)(::ceil( ::max(vy0, ::max(vy1, vy2)))) + 1;
int min_x = (int)(::floor( ::min(vx0, ::min(vx1, vx2)))) - 1;
int min_y = (int)(::floor( ::min(vy0, ::min(vy1, vy2)))) - 1;
if (min_x < 0) min_x = 0;
if (min_y < 0) min_y = 0;
if (max_x >= image_width) max_x = image_width - 1;
if (max_y >= image_height) max_y = image_height - 1;
for (int tx = min_x; tx < max_x; tx++) {
for (int ty = min_y; ty < max_y; ty++) {
tids[job_count] = idx;
pids[job_count] = tx * image_height + ty;
//if(job_count % 100000 == 0) std::cout << job_count << " " << idx << " " << i0 << " " << i1 << " " << i2 << std::endl;
job_count ++;
}
}
}
}
}
return job_count;
}
float sgn(float x){
return (x > 0) ? 1: -1;
}
int compute_triangle_regularization( int image_width, int image_height,
int nx, int ny,
float* vertices, int* indices,
float* d_vertices) {
int job_count = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
for(int t = 0; t < 2; t++) {
int idx = (i * ny + j) * 2 + t;
int i0 = indices[idx * 3 + 0];
int i1 = indices[idx * 3 + 1];
int i2 = indices[idx * 3 + 2];
float vx0 = vertices[i0 * 2 + 0];
float vy0 = vertices[i0 * 2 + 1];
float vx1 = vertices[i1 * 2 + 0];
float vy1 = vertices[i1 * 2 + 1];
float vx2 = vertices[i2 * 2 + 0];
float vy2 = vertices[i2 * 2 + 1];
float cx = (vx0 + vx1 + vx2) / 3;
float cy = (vy0 + vy1 + vy2) / 3;
// Cross-product for area.
float area = ((vx0 - vx1)*(vy0 - vy2) - (vx0 - vx2)*(vy0 - vy1)) / 8.f;
/*
d_vertices[i0 * 2 + 0] += (-1.0/area) * sgn(vx0 - cx);
d_vertices[i0 * 2 + 1] += (-1.0/area) * sgn(vy0 - cy);
d_vertices[i1 * 2 + 0] += (-1.0/area) * sgn(vx1 - cx);
d_vertices[i1 * 2 + 1] += (-1.0/area) * sgn(vy1 - cy);
d_vertices[i2 * 2 + 0] += (-1.0/area) * sgn(vx2 - cx);
d_vertices[i2 * 2 + 1] += (-1.0/area) * sgn(vy2 - cy);
*/
d_vertices[i0 * 2 + 0] += (-1.0/area) * (vy1 - vy2);
d_vertices[i0 * 2 + 1] += (-1.0/area) * (vx2 - vx1);
d_vertices[i1 * 2 + 0] += (-1.0/area) * (vy2 - vy0);
d_vertices[i1 * 2 + 1] += (-1.0/area) * (vx0 - vx2);
d_vertices[i2 * 2 + 0] += (-1.0/area) * (vy0 - vy1);
d_vertices[i2 * 2 + 1] += (-1.0/area) * (vx1 - vx0);
}
}
}
return job_count;
}
std::string type2str(int type) {
std::string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans = 1 + (type >> CV_CN_SHIFT);
switch ( depth ) {
case CV_8U: r = "8U"; break;
case CV_8S: r = "8S"; break;
case CV_16U: r = "16U"; break;
case CV_16S: r = "16S"; break;
case CV_32S: r = "32S"; break;
case CV_32F: r = "32F"; break;
case CV_64F: r = "64F"; break;
default: r = "User"; break;
}
r += "C";
r += (chans+'0');
return r;
}
int main(int argc, char** argv)
{
int nx = 30;
int ny = 30;
// Load an image.
cv::Mat image;
image = cv::imread(argv[1], cv::IMREAD_COLOR);
if( !image.data ) {
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
std::cout << "Fitting " << image.rows << "x" << image.cols << " image" << std::endl;
auto pcolor_num = image.rows * image.cols;
auto pcolor_sz = pcolor_num * sizeof(float) * 3;
auto tcolor_num = nx * ny * 2;
auto tcolor_sz = tcolor_num * sizeof(float) * 3;
auto vertices_num = (nx + 1) * (ny + 1);
auto vertices_sz = vertices_num * sizeof(float) * 2;
auto indices_num = nx * ny * 2;
auto indices_sz = indices_num * sizeof(int) * 3;
auto image_pcs = image.rows * image.cols * 3;
auto image_sz = image_pcs * sizeof(float);
/*
float* pcolors = (float*) malloc(pcolor_sz);
float* tcolors = (float*) malloc(tcolor_sz);
float* vertices = (float*) malloc(vertices_sz);
int* indices = (int*) malloc(indices_sz);
float* d_vertices = (float*) malloc(vertices_sz);
float* d_tcolors = (float*) malloc(tcolor_sz);
float* triangle_image = (float*) malloc(image_sz);
char* triangle_bimage = (char*) malloc(image_pcs * 1);
*/
float* pcolors;
float* tcolors;
float* vertices;
int* indices;
float* d_vertices;
float* d_tcolors;
float* triangle_image;
char* triangle_bimage = (char*) malloc(image_pcs * 1);
int max_jobs = image.rows * image.cols * 10 * sizeof(int);
int* tids;
int* pids;
hipMallocManaged(&pcolors, pcolor_sz);
hipMallocManaged(&tcolors, tcolor_sz);
hipMallocManaged(&vertices, vertices_sz);
hipMallocManaged(&indices, indices_sz);
hipMallocManaged(&tids, max_jobs * sizeof(int));
hipMallocManaged(&pids, max_jobs * sizeof(int));
hipMallocManaged(&triangle_image, image_sz);
hipMallocManaged(&d_tcolors, tcolor_sz);
hipMallocManaged(&d_vertices, vertices_sz);
build_initial_triangles(vertices, indices, tcolors, nx, ny, image.rows, image.cols);
hipDeviceSynchronize();
//int num_jobs = generate_jobs(image.rows, image.cols, nx, ny, vertices, indices, tids, pids);
//std::cout<< "Generated " << num_jobs << " jobs. " << "(Max:" << max_jobs << ")" << std::endl;
// Bound max jobs with some constant multiple of image size.
std::cout << type2str(image.type()) << std::endl;
// Load image data.
for(int i = 0; i < image.rows; i++)
for(int j = 0; j < image.cols; j++){
//int idx = (image.cols * i + j) * 3;
cv::Vec3b v = image.at<cv::Vec3b>(i, j);
pcolors[(image.cols * i + j) * 3 + 0] = ((float)v[0]) / 255.0;//*(image.data + idx + 0);
pcolors[(image.cols * i + j) * 3 + 1] = ((float)v[1]) / 255.0;//*(image.data + idx + 1);
pcolors[(image.cols * i + j) * 3 + 2] = ((float)v[2]) / 255.0;//*(image.data + idx + 2);
}
//x = (float*) malloc(N*sizeof(float));
//y = (float*) malloc(N*sizeof(float));
int num_jobs = 0;
for (int iter = 0; iter < 150; iter ++){
printf("Iteration %d", iter);
// Zero buffers.
hipLaunchKernelGGL(( set_zero), dim3(((tcolor_num * 3) / 256)), dim3(256), 0, 0, d_tcolors);
hipLaunchKernelGGL(( set_zero), dim3(((vertices_num * 2) / 256)), dim3(256), 0, 0, d_vertices);
hipLaunchKernelGGL(( set_zero), dim3((image_pcs / 256)), dim3(256), 0, 0, triangle_image);
num_jobs = generate_jobs(image.rows, image.cols, nx, ny, vertices, indices, tids, pids);
printf("jobs: %d\n", num_jobs);
assert(num_jobs <= max_jobs);
hipDeviceSynchronize();
// Compute derivatives.
hipLaunchKernelGGL(( pt_loss_derivative), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
image.rows, image.cols,
tids,
pids,
num_jobs,
vertices,
indices,
tcolors, pcolors,
d_vertices, d_tcolors);
hipDeviceSynchronize();
compute_triangle_regularization(image.rows, image.cols, nx, ny, vertices, indices, d_vertices);
// Update values.
hipLaunchKernelGGL(( update_values), dim3((nx * ny) / 256 + 1), dim3(256) , 0, 0,
nx, ny, vertices, tcolors, d_vertices, d_tcolors, ALPHA
);
hipDeviceSynchronize();
// Render triangles to image.
hipLaunchKernelGGL(( render_triangles), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
image.rows, image.cols,
tids,
pids,
num_jobs,
vertices,
indices,
tcolors,
triangle_image);
hipDeviceSynchronize();
for(int idx = 0; idx < image_pcs; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
std::stringstream ss;
ss << "iter-" << iter << ".png";
cv::imwrite(ss.str(), cv::Mat(image.rows, image.cols, CV_8UC3, triangle_bimage));
}
/*for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_tcolors[(i * 50 + j) * 3 + 0];
float f1 = d_tcolors[(i * 50 + j) * 3 + 1];
float f2 = d_tcolors[(i * 50 + j) * 3 + 2];
if (f0 != 0 || f1 != 0 || f2 != 0)
std::cout << f0 << ", " << f1 << ", " << f2 << std::endl;
}
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_vertices[(i * 50 + j) * 2 + 0];
float f1 = d_vertices[(i * 50 + j) * 2 + 1];
if (f0 != 0 || f1 != 0)
std::cout << f0 << ", " << f1 << std::endl;
}*/
hipFree(pcolors);
hipFree(tcolors);
hipFree(vertices);
hipFree(indices);
hipFree(tids);
hipFree(pids);
} | 6f766438dfc82eeb565fc7b998c8832ab80e5126.cu | #include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
// Output generated from Teg
#include "tegpixel.h"
#include "renderpixel.h"
// End Temporary placeholder.
#define ALPHA 0.001
__global__
void pt_loss_derivative(int w, int h,
int* tids,
int* pids,
int num_jobs,
float *vertices,
int *indices,
float *tcolors, float* pcolors,
float *d_vertices, float *d_tcolors)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= num_jobs)
return;
auto tri_id = tids[idx];
auto pixel_id = pids[idx];
auto index1 = indices[tri_id * 3 + 0];
auto index2 = indices[tri_id * 3 + 1];
auto index3 = indices[tri_id * 3 + 2];
// Run generated teg function.
auto outvals = tegpixel(
vertices[index1 * 2 + 0],
vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0],
vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0],
vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
pcolors[pixel_id * 3 + 0],
pcolors[pixel_id * 3 + 1],
pcolors[pixel_id * 3 + 2],
tcolors[tri_id * 3 + 0],
tcolors[tri_id * 3 + 1],
tcolors[tri_id * 3 + 2]
);
/*auto temp_outvals = tegpixel(
0, -2,
0, 2,
1, 0,
-1, 1,
-1, 1,
1, 1, 1,
0.5, 0.5, 0.5
);
if (idx == 0){
printf("%f, %f, %f, %f, %f, %f\n",
temp_outvals.o[0], temp_outvals.o[1], temp_outvals.o[2],
temp_outvals.o[3], temp_outvals.o[4], temp_outvals.o[5]);
}*/
/*
if (index3 == 366 && outvals.o[0] != 0.f){
printf("\
Hello from block %d, thread %d\n\
Tri_id %d, Pix_id %d\n\
Pix-color %f, %f, %f\n\
T-colors %f, %f, %f\n\
Out-vals %f, %f, %f\n\
0: %f, %f\n\
1: %f, %f\n\
2: %f, %f\n\
x: %f, %f\n\
y: %f, %f\n\
idxs: %d, %d, %d\n",
blockIdx.x, threadIdx.x,
tri_id, pixel_id,
pcolors[pixel_id * 3 + 0], pcolors[pixel_id * 3 + 1], pcolors[pixel_id * 3 + 2],
tcolors[tri_id * 3 + 0], tcolors[tri_id * 3 + 1], tcolors[tri_id * 3 + 2],
outvals.o[0], outvals.o[1], outvals.o[2],
vertices[index1 * 2 + 0], vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0], vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0], vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
index1, index2, index3);
}*/
// Accumulate derivatives.
// TODO: There needs to be an easier way to accumulate derivatives..
atomicAdd(&d_vertices[index1 * 2 + 0], outvals[0]);
atomicAdd(&d_vertices[index2 * 2 + 0], outvals[1]);
atomicAdd(&d_vertices[index3 * 2 + 0], outvals[2]);
atomicAdd(&d_vertices[index1 * 2 + 1], outvals[3]);
atomicAdd(&d_vertices[index2 * 2 + 1], outvals[4]);
atomicAdd(&d_vertices[index3 * 2 + 1], outvals[5]);
//if (index3 == 366) {
// printf("pt_loss_derivative for %d: %f\n", index3, d_vertices[index3 * 2 + 0]);
//}
atomicAdd(&d_tcolors[tri_id * 3 + 0], outvals[13]);
atomicAdd(&d_tcolors[tri_id * 3 + 1], outvals[14]);
atomicAdd(&d_tcolors[tri_id * 3 + 2], outvals[15]);
}
__global__
void render_triangles(int w, int h,
int* tids,
int* pids,
int num_jobs,
float *vertices,
int *indices,
float *tcolors,
float *image)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= num_jobs) return;
auto tri_id = tids[idx];
auto pixel_id = pids[idx];
auto index1 = indices[tri_id * 3 + 0];
auto index2 = indices[tri_id * 3 + 1];
auto index3 = indices[tri_id * 3 + 2];
// Run generated teg function.
auto outvals = renderpixel(
vertices[index1 * 2 + 0],
vertices[index1 * 2 + 1],
vertices[index2 * 2 + 0],
vertices[index2 * 2 + 1],
vertices[index3 * 2 + 0],
vertices[index3 * 2 + 1],
floorf(pixel_id / h),
floorf(pixel_id / h) + 1,
(float)(pixel_id % h),
(float)(pixel_id % h + 1),
tcolors[tri_id * 3 + 0],
tcolors[tri_id * 3 + 1],
tcolors[tri_id * 3 + 2]
);
// Accumulate image.
atomicAdd(&image[pixel_id * 3 + 0], outvals[0]);
atomicAdd(&image[pixel_id * 3 + 1], outvals[1]);
atomicAdd(&image[pixel_id * 3 + 2], outvals[2]);
}
__global__
void update_values(int nx, int ny,
float *vertices, float *tcolors,
float *d_vertices, float *d_tcolors,
float alpha)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int x = idx / (ny + 1);
int y = idx % (ny + 1);
if (x > 0 && y > 0 && x < nx && y < ny){
vertices[idx * 2 + 0] = vertices[idx * 2 + 0] - alpha * 100 * d_vertices[idx * 2 + 0];
vertices[idx * 2 + 1] = vertices[idx * 2 + 1] - alpha * 100 * d_vertices[idx * 2 + 1];
}
x = idx / (ny);
y = idx % (ny);
if (x >= 0 && y >= 0 && x < nx && y < ny){
tcolors[idx * 6 + 0] = tcolors[idx * 6 + 0] - alpha * d_tcolors[idx * 6 + 0];
tcolors[idx * 6 + 1] = tcolors[idx * 6 + 1] - alpha * d_tcolors[idx * 6 + 1];
tcolors[idx * 6 + 2] = tcolors[idx * 6 + 2] - alpha * d_tcolors[idx * 6 + 2];
tcolors[idx * 6 + 3] = tcolors[idx * 6 + 3] - alpha * d_tcolors[idx * 6 + 3];
tcolors[idx * 6 + 4] = tcolors[idx * 6 + 4] - alpha * d_tcolors[idx * 6 + 4];
tcolors[idx * 6 + 5] = tcolors[idx * 6 + 5] - alpha * d_tcolors[idx * 6 + 5];
}
}
__global__
void set_zero(float* data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
data[idx] = 0.f;
}
__host__
void build_initial_triangles(float* vertices, int* indices,
float* tcolors,
int nx, int ny,
int image_width, int image_height) {
float tri_width = (float)(image_width) / nx;
float tri_height = (float)(image_height) / ny;
for(int i = 0; i < nx + 1; i++) {
for(int j = 0; j < ny + 1; j++) {
vertices[(i * (ny + 1) + j) * 2 + 0] = tri_width * i;
vertices[(i * (ny + 1) + j) * 2 + 1] = tri_height * j;
}
}
for(int i = 0; i < nx; i++) {
for(int j = 0; j < ny; j++) {
indices[(i * ny + j) * 6 + 0] = ((i + 0) * (ny + 1) + j + 0);
indices[(i * ny + j) * 6 + 1] = ((i + 0) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 2] = ((i + 1) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 3] = ((i + 0) * (ny + 1) + j + 0);
indices[(i * ny + j) * 6 + 4] = ((i + 1) * (ny + 1) + j + 1);
indices[(i * ny + j) * 6 + 5] = ((i + 1) * (ny + 1) + j + 0);
}
}
for(int i = 0; i < nx; i++)
for(int j = 0; j < ny; j++) {
tcolors[((i * ny) + j) * 6 + 0] = 0.f;
tcolors[((i * ny) + j) * 6 + 1] = 0.f;
tcolors[((i * ny) + j) * 6 + 2] = 0.f;
tcolors[((i * ny) + j) * 6 + 3] = 0.f;
tcolors[((i * ny) + j) * 6 + 4] = 0.f;
tcolors[((i * ny) + j) * 6 + 5] = 0.f;
}
}
int generate_jobs( int image_width, int image_height,
int nx, int ny,
float* vertices, int* indices,
int* tids, int* pids ) {
int job_count = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
for(int t = 0; t < 2; t++) {
int idx = (i * ny + j) * 2 + t;
int i0 = indices[idx * 3 + 0];
int i1 = indices[idx * 3 + 1];
int i2 = indices[idx * 3 + 2];
float vx0 = vertices[i0 * 2 + 0];
float vy0 = vertices[i0 * 2 + 1];
float vx1 = vertices[i1 * 2 + 0];
float vy1 = vertices[i1 * 2 + 1];
float vx2 = vertices[i2 * 2 + 0];
float vy2 = vertices[i2 * 2 + 1];
int max_x = (int)(std::ceil( std::max(vx0, std::max(vx1, vx2)))) + 1;
int max_y = (int)(std::ceil( std::max(vy0, std::max(vy1, vy2)))) + 1;
int min_x = (int)(std::floor( std::min(vx0, std::min(vx1, vx2)))) - 1;
int min_y = (int)(std::floor( std::min(vy0, std::min(vy1, vy2)))) - 1;
if (min_x < 0) min_x = 0;
if (min_y < 0) min_y = 0;
if (max_x >= image_width) max_x = image_width - 1;
if (max_y >= image_height) max_y = image_height - 1;
for (int tx = min_x; tx < max_x; tx++) {
for (int ty = min_y; ty < max_y; ty++) {
tids[job_count] = idx;
pids[job_count] = tx * image_height + ty;
//if(job_count % 100000 == 0) std::cout << job_count << " " << idx << " " << i0 << " " << i1 << " " << i2 << std::endl;
job_count ++;
}
}
}
}
}
return job_count;
}
float sgn(float x){
return (x > 0) ? 1: -1;
}
int compute_triangle_regularization( int image_width, int image_height,
int nx, int ny,
float* vertices, int* indices,
float* d_vertices) {
int job_count = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
for(int t = 0; t < 2; t++) {
int idx = (i * ny + j) * 2 + t;
int i0 = indices[idx * 3 + 0];
int i1 = indices[idx * 3 + 1];
int i2 = indices[idx * 3 + 2];
float vx0 = vertices[i0 * 2 + 0];
float vy0 = vertices[i0 * 2 + 1];
float vx1 = vertices[i1 * 2 + 0];
float vy1 = vertices[i1 * 2 + 1];
float vx2 = vertices[i2 * 2 + 0];
float vy2 = vertices[i2 * 2 + 1];
float cx = (vx0 + vx1 + vx2) / 3;
float cy = (vy0 + vy1 + vy2) / 3;
// Cross-product for area.
float area = ((vx0 - vx1)*(vy0 - vy2) - (vx0 - vx2)*(vy0 - vy1)) / 8.f;
/*
d_vertices[i0 * 2 + 0] += (-1.0/area) * sgn(vx0 - cx);
d_vertices[i0 * 2 + 1] += (-1.0/area) * sgn(vy0 - cy);
d_vertices[i1 * 2 + 0] += (-1.0/area) * sgn(vx1 - cx);
d_vertices[i1 * 2 + 1] += (-1.0/area) * sgn(vy1 - cy);
d_vertices[i2 * 2 + 0] += (-1.0/area) * sgn(vx2 - cx);
d_vertices[i2 * 2 + 1] += (-1.0/area) * sgn(vy2 - cy);
*/
d_vertices[i0 * 2 + 0] += (-1.0/area) * (vy1 - vy2);
d_vertices[i0 * 2 + 1] += (-1.0/area) * (vx2 - vx1);
d_vertices[i1 * 2 + 0] += (-1.0/area) * (vy2 - vy0);
d_vertices[i1 * 2 + 1] += (-1.0/area) * (vx0 - vx2);
d_vertices[i2 * 2 + 0] += (-1.0/area) * (vy0 - vy1);
d_vertices[i2 * 2 + 1] += (-1.0/area) * (vx1 - vx0);
}
}
}
return job_count;
}
std::string type2str(int type) {
std::string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans = 1 + (type >> CV_CN_SHIFT);
switch ( depth ) {
case CV_8U: r = "8U"; break;
case CV_8S: r = "8S"; break;
case CV_16U: r = "16U"; break;
case CV_16S: r = "16S"; break;
case CV_32S: r = "32S"; break;
case CV_32F: r = "32F"; break;
case CV_64F: r = "64F"; break;
default: r = "User"; break;
}
r += "C";
r += (chans+'0');
return r;
}
int main(int argc, char** argv)
{
int nx = 30;
int ny = 30;
// Load an image.
cv::Mat image;
image = cv::imread(argv[1], cv::IMREAD_COLOR);
if( !image.data ) {
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
std::cout << "Fitting " << image.rows << "x" << image.cols << " image" << std::endl;
auto pcolor_num = image.rows * image.cols;
auto pcolor_sz = pcolor_num * sizeof(float) * 3;
auto tcolor_num = nx * ny * 2;
auto tcolor_sz = tcolor_num * sizeof(float) * 3;
auto vertices_num = (nx + 1) * (ny + 1);
auto vertices_sz = vertices_num * sizeof(float) * 2;
auto indices_num = nx * ny * 2;
auto indices_sz = indices_num * sizeof(int) * 3;
auto image_pcs = image.rows * image.cols * 3;
auto image_sz = image_pcs * sizeof(float);
/*
float* pcolors = (float*) malloc(pcolor_sz);
float* tcolors = (float*) malloc(tcolor_sz);
float* vertices = (float*) malloc(vertices_sz);
int* indices = (int*) malloc(indices_sz);
float* d_vertices = (float*) malloc(vertices_sz);
float* d_tcolors = (float*) malloc(tcolor_sz);
float* triangle_image = (float*) malloc(image_sz);
char* triangle_bimage = (char*) malloc(image_pcs * 1);
*/
float* pcolors;
float* tcolors;
float* vertices;
int* indices;
float* d_vertices;
float* d_tcolors;
float* triangle_image;
char* triangle_bimage = (char*) malloc(image_pcs * 1);
int max_jobs = image.rows * image.cols * 10 * sizeof(int);
int* tids;
int* pids;
cudaMallocManaged(&pcolors, pcolor_sz);
cudaMallocManaged(&tcolors, tcolor_sz);
cudaMallocManaged(&vertices, vertices_sz);
cudaMallocManaged(&indices, indices_sz);
cudaMallocManaged(&tids, max_jobs * sizeof(int));
cudaMallocManaged(&pids, max_jobs * sizeof(int));
cudaMallocManaged(&triangle_image, image_sz);
cudaMallocManaged(&d_tcolors, tcolor_sz);
cudaMallocManaged(&d_vertices, vertices_sz);
build_initial_triangles(vertices, indices, tcolors, nx, ny, image.rows, image.cols);
cudaDeviceSynchronize();
//int num_jobs = generate_jobs(image.rows, image.cols, nx, ny, vertices, indices, tids, pids);
//std::cout<< "Generated " << num_jobs << " jobs. " << "(Max:" << max_jobs << ")" << std::endl;
// Bound max jobs with some constant multiple of image size.
std::cout << type2str(image.type()) << std::endl;
// Load image data.
for(int i = 0; i < image.rows; i++)
for(int j = 0; j < image.cols; j++){
//int idx = (image.cols * i + j) * 3;
cv::Vec3b v = image.at<cv::Vec3b>(i, j);
pcolors[(image.cols * i + j) * 3 + 0] = ((float)v[0]) / 255.0;//*(image.data + idx + 0);
pcolors[(image.cols * i + j) * 3 + 1] = ((float)v[1]) / 255.0;//*(image.data + idx + 1);
pcolors[(image.cols * i + j) * 3 + 2] = ((float)v[2]) / 255.0;//*(image.data + idx + 2);
}
//x = (float*) malloc(N*sizeof(float));
//y = (float*) malloc(N*sizeof(float));
int num_jobs = 0;
for (int iter = 0; iter < 150; iter ++){
printf("Iteration %d", iter);
// Zero buffers.
set_zero<<<((tcolor_num * 3) / 256), 256>>>(d_tcolors);
set_zero<<<((vertices_num * 2) / 256), 256>>>(d_vertices);
set_zero<<<(image_pcs / 256), 256>>>(triangle_image);
num_jobs = generate_jobs(image.rows, image.cols, nx, ny, vertices, indices, tids, pids);
printf("jobs: %d\n", num_jobs);
assert(num_jobs <= max_jobs);
cudaDeviceSynchronize();
// Compute derivatives.
pt_loss_derivative<<<(num_jobs / 256) + 1, 256>>>(
image.rows, image.cols,
tids,
pids,
num_jobs,
vertices,
indices,
tcolors, pcolors,
d_vertices, d_tcolors);
cudaDeviceSynchronize();
compute_triangle_regularization(image.rows, image.cols, nx, ny, vertices, indices, d_vertices);
// Update values.
update_values<<< (nx * ny) / 256 + 1, 256 >>>(
nx, ny, vertices, tcolors, d_vertices, d_tcolors, ALPHA
);
cudaDeviceSynchronize();
// Render triangles to image.
render_triangles<<<(num_jobs / 256) + 1, 256>>>(
image.rows, image.cols,
tids,
pids,
num_jobs,
vertices,
indices,
tcolors,
triangle_image);
cudaDeviceSynchronize();
for(int idx = 0; idx < image_pcs; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
std::stringstream ss;
ss << "iter-" << iter << ".png";
cv::imwrite(ss.str(), cv::Mat(image.rows, image.cols, CV_8UC3, triangle_bimage));
}
/*for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_tcolors[(i * 50 + j) * 3 + 0];
float f1 = d_tcolors[(i * 50 + j) * 3 + 1];
float f2 = d_tcolors[(i * 50 + j) * 3 + 2];
if (f0 != 0 || f1 != 0 || f2 != 0)
std::cout << f0 << ", " << f1 << ", " << f2 << std::endl;
}
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_vertices[(i * 50 + j) * 2 + 0];
float f1 = d_vertices[(i * 50 + j) * 2 + 1];
if (f0 != 0 || f1 != 0)
std::cout << f0 << ", " << f1 << std::endl;
}*/
cudaFree(pcolors);
cudaFree(tcolors);
cudaFree(vertices);
cudaFree(indices);
cudaFree(tids);
cudaFree(pids);
} |
db02cb1fc959666925fd3a1edf87723c695187df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_plus_2_front [3][2];
static int dims_update_halo_kernel2_xvel_plus_2_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_front_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = xvel0(0,0,-2);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = xvel1(0,0,-2);
}
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[0][0] * dims_update_halo_kernel2_xvel_plus_2_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[1][0] * dims_update_halo_kernel2_xvel_plus_2_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_plus_2_front[0][0], dims_update_halo_kernel2_xvel_plus_2_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_plus_2_front[1][0], dims_update_halo_kernel2_xvel_plus_2_front[1][1], arg1);
update_halo_kernel2_xvel_plus_2_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,34)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(34,"update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[34].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_plus_2_front_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_plus_2_front_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_plus_2_front_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_plus_2_front_h[1][1]) {
dims_update_halo_kernel2_xvel_plus_2_front_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_plus_2_front_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_plus_2_front_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_plus_2_front_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_xvel_plus_2_front, dims_update_halo_kernel2_xvel_plus_2_front_h, sizeof(dims_update_halo_kernel2_xvel_plus_2_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[34].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[34].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[34].mpi_time += t2-t1;
OPS_kernels[34].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[34].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 34;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 34;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(34,"update_halo_kernel2_xvel_plus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
| db02cb1fc959666925fd3a1edf87723c695187df.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_plus_2_front [3][2];
static int dims_update_halo_kernel2_xvel_plus_2_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_front_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = xvel0(0,0,-2);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = xvel1(0,0,-2);
}
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[0][0] * dims_update_halo_kernel2_xvel_plus_2_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_front[1][0] * dims_update_halo_kernel2_xvel_plus_2_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_plus_2_front[0][0], dims_update_halo_kernel2_xvel_plus_2_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_plus_2_front[1][0], dims_update_halo_kernel2_xvel_plus_2_front[1][1], arg1);
update_halo_kernel2_xvel_plus_2_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,34)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(34,"update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[34].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_plus_2_front_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_plus_2_front_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_plus_2_front_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_plus_2_front_h[1][1]) {
dims_update_halo_kernel2_xvel_plus_2_front_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_plus_2_front_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_plus_2_front_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_plus_2_front_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_xvel_plus_2_front, dims_update_halo_kernel2_xvel_plus_2_front_h, sizeof(dims_update_halo_kernel2_xvel_plus_2_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[34].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_xvel_plus_2_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[34].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[34].mpi_time += t2-t1;
OPS_kernels[34].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[34].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 34;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 34;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(34,"update_halo_kernel2_xvel_plus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
|
1d7467b86d5fc27774b12d903668d8ec39a9df08.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.hpp"
#include "avro_gpu.hpp"
#include <io/comp/gpuinflate.hpp>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/avro.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/equal.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/tabulate.h>
#include <nvcomp/snappy.h>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
using cudf::device_span;
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
*/
type_id to_type_id(avro::schema_entry const* col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
*/
class metadata : public file_metadata {
public:
explicit metadata(datasource* const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*/
void init_and_select_rows(int& row_start, int& row_count)
{
auto const buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
auto const num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (auto const& use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
return selection;
}
private:
datasource* const source;
};
rmm::device_buffer decompress_data(datasource& source,
metadata& meta,
rmm::device_buffer const& comp_block_data,
rmm::cuda_stream_view stream)
{
if (meta.codec == "deflate") {
auto inflate_in = hostdevice_vector<device_span<uint8_t const>>(meta.block_list.size(), stream);
auto inflate_out = hostdevice_vector<device_span<uint8_t>>(meta.block_list.size(), stream);
auto inflate_stats = hostdevice_vector<compression_result>(meta.block_list.size(), stream);
thrust::fill(rmm::exec_policy(stream),
inflate_stats.d_begin(),
inflate_stats.d_end(),
compression_result{0, compression_status::FAILURE});
// Guess an initial maximum uncompressed block size. We estimate the compression factor is two
// and round up to the next multiple of 4096 bytes.
uint32_t const initial_blk_len = meta.max_block_size * 2 + (meta.max_block_size * 2) % 4096;
size_t const uncomp_size = initial_blk_len * meta.block_list.size();
rmm::device_buffer decomp_block_data(uncomp_size, stream);
auto const base_offset = meta.block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto const src_pos = meta.block_list[i].offset - base_offset;
inflate_in[i] = {static_cast<uint8_t const*>(comp_block_data.data()) + src_pos,
meta.block_list[i].size};
inflate_out[i] = {static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos, initial_blk_len};
// Update blocks offsets & sizes to refer to uncompressed data
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_out[i].size());
dst_pos += meta.block_list[i].size;
}
inflate_in.host_to_device(stream);
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
inflate_out.host_to_device(stream);
gpuinflate(inflate_in, inflate_out, inflate_stats, gzip_header_included::NO, stream);
inflate_stats.device_to_host(stream, true);
// Check if larger output is required, as it's not known ahead of time
if (loop_cnt == 0) {
std::vector<size_t> actual_uncomp_sizes;
actual_uncomp_sizes.reserve(inflate_out.size());
std::transform(inflate_out.begin(),
inflate_out.end(),
inflate_stats.begin(),
std::back_inserter(actual_uncomp_sizes),
[](auto const& inf_out, auto const& inf_stats) {
// If error status is OUTPUT_OVERFLOW, the `bytes_written` field
// actually contains the uncompressed data size
return inf_stats.status == compression_status::OUTPUT_OVERFLOW
? ::max(inf_out.size(), inf_stats.bytes_written)
: inf_out.size();
});
auto const total_actual_uncomp_size =
std::accumulate(actual_uncomp_sizes.cbegin(), actual_uncomp_sizes.cend(), 0ul);
if (total_actual_uncomp_size > uncomp_size) {
decomp_block_data.resize(total_actual_uncomp_size, stream);
for (size_t i = 0; i < meta.block_list.size(); ++i) {
meta.block_list[i].offset =
i > 0 ? (meta.block_list[i - 1].size + meta.block_list[i - 1].offset) : 0;
meta.block_list[i].size = static_cast<uint32_t>(actual_uncomp_sizes[i]);
inflate_out[i] = {
static_cast<uint8_t*>(decomp_block_data.data()) + meta.block_list[i].offset,
meta.block_list[i].size};
}
} else {
break;
}
}
}
return decomp_block_data;
} else if (meta.codec == "snappy") {
size_t const num_blocks = meta.block_list.size();
// comp_block_data contains contents of the avro file starting from the first block, excluding
// file header. meta.block_list[i].offset refers to offset of block i in the file, including
// file header.
// Find ptrs to each compressed block in comp_block_data by removing header offset.
hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_ptrs.host_ptr(),
[&](auto const& block) {
return static_cast<std::byte const*>(comp_block_data.data()) +
(block.offset - meta.block_list[0].offset);
});
compressed_data_ptrs.host_to_device(stream);
hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_sizes.host_ptr(),
[](auto const& block) { return block.size; });
compressed_data_sizes.host_to_device(stream);
hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream);
nvcompStatus_t status =
nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
num_blocks,
stream.value());
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get uncompressed sizes for snappy compressed blocks");
uncompressed_data_sizes.device_to_host(stream, true);
size_t const uncompressed_data_size =
std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end());
size_t const max_uncomp_block_size = std::reduce(
uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>());
size_t temp_size;
status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream);
std::exclusive_scan(uncompressed_data_sizes.begin(),
uncompressed_data_sizes.end(),
uncompressed_data_offsets.begin(),
0);
uncompressed_data_offsets.host_to_device(stream);
thrust::tabulate(rmm::exec_policy(stream),
uncompressed_data_ptrs.begin(),
uncompressed_data_ptrs.end(),
[off = uncompressed_data_offsets.device_ptr(),
data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) {
return data + off[i];
});
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
uncompressed_data_sizes.d_begin(),
uncompressed_data_sizes.d_end(),
actual_uncompressed_data_sizes.begin()),
"Mismatch in expected and actual decompressed size during snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
// Update blocks offsets & sizes to refer to uncompressed data
for (size_t i = 0; i < num_blocks; i++) {
meta.block_list[i].offset = uncompressed_data_offsets[i];
meta.block_list[i].size = uncompressed_data_sizes[i];
}
return decomp_block_data;
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
}
std::vector<column_buffer> decode_data(metadata& meta,
rmm::device_buffer const& block_data,
std::vector<std::pair<uint32_t, uint32_t>> const& dict,
device_span<string_index_pair const> global_dictionary,
size_t num_rows,
std::vector<std::pair<int, std::string>> const& selection,
std::vector<data_type> const& column_types,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto out_buffers = std::vector<column_buffer>();
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selection[i].first;
bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr);
}
// Build gpu schema
auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size(), stream);
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < meta.schema.size(); i++) {
type_kind_e kind = meta.schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += meta.schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = meta.schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children);
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 ||
(meta.schema[i].num_children == 2 &&
(meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
int schema_data_idx = meta.columns[col_idx].schema_data_idx;
int schema_null_idx = meta.columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (meta.schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream);
schema_desc.host_to_device(stream);
gpu::DecodeAvroColumnData(block_list,
schema_desc.device_ptr(),
global_dictionary,
static_cast<uint8_t const*>(block_data.data()),
static_cast<uint32_t>(schema_desc.size()),
meta.num_rows,
meta.skip_rows,
min_row_data_size,
stream);
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDF_CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
hipMemcpyHostToDevice,
stream.value()));
}
}
schema_desc.device_to_host(stream, true);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
auto const schema_null_idx = meta.columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
return out_buffers;
}
table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source,
avro_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto skip_rows = options.get_skip_rows();
auto num_rows = options.get_num_rows();
num_rows = (num_rows != 0) ? num_rows : -1;
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Open the source Avro dataset metadata
auto meta = metadata(source.get());
// Select and read partial metadata / schema within the subset of rows
meta.init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = meta.select_columns(options.get_columns());
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (auto const& col : selected_columns) {
auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (meta.total_data_size > 0) {
rmm::device_buffer block_data;
if (source->is_device_read_preferred(meta.total_data_size)) {
block_data = rmm::device_buffer{meta.total_data_size, stream};
auto read_bytes = source->device_read(meta.block_list[0].offset,
meta.total_data_size,
static_cast<uint8_t*>(block_data.data()),
stream);
block_data.resize(read_bytes, stream);
} else {
auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size);
block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream};
}
if (meta.codec != "" && meta.codec != "null") {
auto decomp_block_data = decompress_data(*source, meta, block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = meta.block_list[0].offset;
for (size_t i = 0; i < meta.block_list.size(); i++) {
meta.block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (auto const& sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream);
auto d_global_dict_data = rmm::device_uvector<char>(0, stream);
if (total_dictionary_entries > 0) {
auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries);
auto h_global_dict_data = std::vector<char>(dictionary_data_size);
size_t dict_pos = 0;
for (size_t i = 0; i < column_types.size(); ++i) {
auto const col_idx = selected_columns[i].first;
auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
auto const col_dict_entries = &(h_global_dict[dict[i].first]);
for (size_t j = 0; j < dict[i].second; j++) {
auto const& symbols = col_schema.symbols[j];
auto const data_dst = h_global_dict_data.data() + dict_pos;
auto const len = symbols.length();
col_dict_entries[j].first = data_dst;
col_dict_entries[j].second = len;
std::copy(symbols.c_str(), symbols.c_str() + len, data_dst);
dict_pos += len;
}
}
d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream);
d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream);
stream.synchronize();
}
auto out_buffers = decode_data(meta,
block_data,
dict,
d_global_dict,
num_rows,
selected_columns,
column_types,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, std::nullopt, stream, mr));
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = meta.user_data;
metadata_out.per_file_user_data = {{meta.user_data.begin(), meta.user_data.end()}};
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
| 1d7467b86d5fc27774b12d903668d8ec39a9df08.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.hpp"
#include "avro_gpu.hpp"
#include <io/comp/gpuinflate.hpp>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/avro.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/equal.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/tabulate.h>
#include <nvcomp/snappy.h>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
using cudf::device_span;
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
*/
type_id to_type_id(avro::schema_entry const* col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
*/
class metadata : public file_metadata {
public:
explicit metadata(datasource* const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*/
void init_and_select_rows(int& row_start, int& row_count)
{
auto const buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
auto const num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (auto const& use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
return selection;
}
private:
datasource* const source;
};
rmm::device_buffer decompress_data(datasource& source,
metadata& meta,
rmm::device_buffer const& comp_block_data,
rmm::cuda_stream_view stream)
{
if (meta.codec == "deflate") {
auto inflate_in = hostdevice_vector<device_span<uint8_t const>>(meta.block_list.size(), stream);
auto inflate_out = hostdevice_vector<device_span<uint8_t>>(meta.block_list.size(), stream);
auto inflate_stats = hostdevice_vector<compression_result>(meta.block_list.size(), stream);
thrust::fill(rmm::exec_policy(stream),
inflate_stats.d_begin(),
inflate_stats.d_end(),
compression_result{0, compression_status::FAILURE});
// Guess an initial maximum uncompressed block size. We estimate the compression factor is two
// and round up to the next multiple of 4096 bytes.
uint32_t const initial_blk_len = meta.max_block_size * 2 + (meta.max_block_size * 2) % 4096;
size_t const uncomp_size = initial_blk_len * meta.block_list.size();
rmm::device_buffer decomp_block_data(uncomp_size, stream);
auto const base_offset = meta.block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto const src_pos = meta.block_list[i].offset - base_offset;
inflate_in[i] = {static_cast<uint8_t const*>(comp_block_data.data()) + src_pos,
meta.block_list[i].size};
inflate_out[i] = {static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos, initial_blk_len};
// Update blocks offsets & sizes to refer to uncompressed data
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_out[i].size());
dst_pos += meta.block_list[i].size;
}
inflate_in.host_to_device(stream);
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
inflate_out.host_to_device(stream);
gpuinflate(inflate_in, inflate_out, inflate_stats, gzip_header_included::NO, stream);
inflate_stats.device_to_host(stream, true);
// Check if larger output is required, as it's not known ahead of time
if (loop_cnt == 0) {
std::vector<size_t> actual_uncomp_sizes;
actual_uncomp_sizes.reserve(inflate_out.size());
std::transform(inflate_out.begin(),
inflate_out.end(),
inflate_stats.begin(),
std::back_inserter(actual_uncomp_sizes),
[](auto const& inf_out, auto const& inf_stats) {
// If error status is OUTPUT_OVERFLOW, the `bytes_written` field
// actually contains the uncompressed data size
return inf_stats.status == compression_status::OUTPUT_OVERFLOW
? std::max(inf_out.size(), inf_stats.bytes_written)
: inf_out.size();
});
auto const total_actual_uncomp_size =
std::accumulate(actual_uncomp_sizes.cbegin(), actual_uncomp_sizes.cend(), 0ul);
if (total_actual_uncomp_size > uncomp_size) {
decomp_block_data.resize(total_actual_uncomp_size, stream);
for (size_t i = 0; i < meta.block_list.size(); ++i) {
meta.block_list[i].offset =
i > 0 ? (meta.block_list[i - 1].size + meta.block_list[i - 1].offset) : 0;
meta.block_list[i].size = static_cast<uint32_t>(actual_uncomp_sizes[i]);
inflate_out[i] = {
static_cast<uint8_t*>(decomp_block_data.data()) + meta.block_list[i].offset,
meta.block_list[i].size};
}
} else {
break;
}
}
}
return decomp_block_data;
} else if (meta.codec == "snappy") {
size_t const num_blocks = meta.block_list.size();
// comp_block_data contains contents of the avro file starting from the first block, excluding
// file header. meta.block_list[i].offset refers to offset of block i in the file, including
// file header.
// Find ptrs to each compressed block in comp_block_data by removing header offset.
hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_ptrs.host_ptr(),
[&](auto const& block) {
return static_cast<std::byte const*>(comp_block_data.data()) +
(block.offset - meta.block_list[0].offset);
});
compressed_data_ptrs.host_to_device(stream);
hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_sizes.host_ptr(),
[](auto const& block) { return block.size; });
compressed_data_sizes.host_to_device(stream);
hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream);
nvcompStatus_t status =
nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
num_blocks,
stream.value());
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get uncompressed sizes for snappy compressed blocks");
uncompressed_data_sizes.device_to_host(stream, true);
size_t const uncompressed_data_size =
std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end());
size_t const max_uncomp_block_size = std::reduce(
uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>());
size_t temp_size;
status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream);
std::exclusive_scan(uncompressed_data_sizes.begin(),
uncompressed_data_sizes.end(),
uncompressed_data_offsets.begin(),
0);
uncompressed_data_offsets.host_to_device(stream);
thrust::tabulate(rmm::exec_policy(stream),
uncompressed_data_ptrs.begin(),
uncompressed_data_ptrs.end(),
[off = uncompressed_data_offsets.device_ptr(),
data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) {
return data + off[i];
});
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
uncompressed_data_sizes.d_begin(),
uncompressed_data_sizes.d_end(),
actual_uncompressed_data_sizes.begin()),
"Mismatch in expected and actual decompressed size during snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
// Update blocks offsets & sizes to refer to uncompressed data
for (size_t i = 0; i < num_blocks; i++) {
meta.block_list[i].offset = uncompressed_data_offsets[i];
meta.block_list[i].size = uncompressed_data_sizes[i];
}
return decomp_block_data;
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
}
std::vector<column_buffer> decode_data(metadata& meta,
rmm::device_buffer const& block_data,
std::vector<std::pair<uint32_t, uint32_t>> const& dict,
device_span<string_index_pair const> global_dictionary,
size_t num_rows,
std::vector<std::pair<int, std::string>> const& selection,
std::vector<data_type> const& column_types,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto out_buffers = std::vector<column_buffer>();
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selection[i].first;
bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr);
}
// Build gpu schema
auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size(), stream);
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < meta.schema.size(); i++) {
type_kind_e kind = meta.schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += meta.schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = meta.schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children);
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 ||
(meta.schema[i].num_children == 2 &&
(meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
int schema_data_idx = meta.columns[col_idx].schema_data_idx;
int schema_null_idx = meta.columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (meta.schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream);
schema_desc.host_to_device(stream);
gpu::DecodeAvroColumnData(block_list,
schema_desc.device_ptr(),
global_dictionary,
static_cast<uint8_t const*>(block_data.data()),
static_cast<uint32_t>(schema_desc.size()),
meta.num_rows,
meta.skip_rows,
min_row_data_size,
stream);
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDF_CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
cudaMemcpyHostToDevice,
stream.value()));
}
}
schema_desc.device_to_host(stream, true);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
auto const schema_null_idx = meta.columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
return out_buffers;
}
table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source,
avro_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto skip_rows = options.get_skip_rows();
auto num_rows = options.get_num_rows();
num_rows = (num_rows != 0) ? num_rows : -1;
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Open the source Avro dataset metadata
auto meta = metadata(source.get());
// Select and read partial metadata / schema within the subset of rows
meta.init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = meta.select_columns(options.get_columns());
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (auto const& col : selected_columns) {
auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (meta.total_data_size > 0) {
rmm::device_buffer block_data;
if (source->is_device_read_preferred(meta.total_data_size)) {
block_data = rmm::device_buffer{meta.total_data_size, stream};
auto read_bytes = source->device_read(meta.block_list[0].offset,
meta.total_data_size,
static_cast<uint8_t*>(block_data.data()),
stream);
block_data.resize(read_bytes, stream);
} else {
auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size);
block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream};
}
if (meta.codec != "" && meta.codec != "null") {
auto decomp_block_data = decompress_data(*source, meta, block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = meta.block_list[0].offset;
for (size_t i = 0; i < meta.block_list.size(); i++) {
meta.block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (auto const& sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream);
auto d_global_dict_data = rmm::device_uvector<char>(0, stream);
if (total_dictionary_entries > 0) {
auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries);
auto h_global_dict_data = std::vector<char>(dictionary_data_size);
size_t dict_pos = 0;
for (size_t i = 0; i < column_types.size(); ++i) {
auto const col_idx = selected_columns[i].first;
auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
auto const col_dict_entries = &(h_global_dict[dict[i].first]);
for (size_t j = 0; j < dict[i].second; j++) {
auto const& symbols = col_schema.symbols[j];
auto const data_dst = h_global_dict_data.data() + dict_pos;
auto const len = symbols.length();
col_dict_entries[j].first = data_dst;
col_dict_entries[j].second = len;
std::copy(symbols.c_str(), symbols.c_str() + len, data_dst);
dict_pos += len;
}
}
d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream);
d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream);
stream.synchronize();
}
auto out_buffers = decode_data(meta,
block_data,
dict,
d_global_dict,
num_rows,
selected_columns,
column_types,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, std::nullopt, stream, mr));
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = meta.user_data;
metadata_out.per_file_user_data = {{meta.user_data.begin(), meta.user_data.end()}};
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
|
e0530347c665cb00859b305625efe9693d0c571a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
/*
* Monte Carlo Pi Estimation Algorithm in CUDA
*
* This Project uses Cuda and thread
* topology to estimate Pi.
*
* Author: Clayton Glenn
*/
#define MAX_THREAD 16
#define MIN_THREAD 8
#define MAX_N 20
#define MIN_N 8
#define BLOCK_SIZE 256
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void monte(int *flags, float *x_vals, float *y_vals, int t, int n) {
//Get Thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Loop N/Threads times plus one
for(int i = 0; i < (n/t + 1); i++){
// If looped id count is less than n, grab rand x
// and y and check within unit. Increment if so
if((i*t+tid) < n){
if((pow(x_vals[(i*t+tid)], 2) + pow(y_vals[(i*t+tid)],2)) <= 1) flags[(tid)]++;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./monte [# threads] [# points]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MIN_THREAD;
int THREADS = MIN_THREAD;
int BLOCKS = 256;
// Check for immediate errors in args
if (argc < 3 || argc > 3) printerror();
// Get Thread Count Per Block
THREADS = strtol(argv[1], NULL, 10);
THREADS = ((int)pow(2, THREADS));
if(THREADS < BLOCKS) BLOCKS = 1;
else THREADS = THREADS / BLOCKS;
// Get N Coordinates
N = strtol(argv[2], NULL, 10);
N = (int)pow(2, N);
// Print N and Threads for distinguish
printf("(Threads: %d) (N: %d)\n", THREADS * BLOCKS, N);
//Set Array of Size Thread
int flags[BLOCKS*THREADS];
float randx[N];
float randy[N];
srand( time( NULL ) );
for(int i = 0; i < N; i++){
if(i < BLOCKS*THREADS)flags[i] = 0;
randx[i] = ( float )rand()/RAND_MAX;
randy[i] = ( float )rand()/RAND_MAX;
}
// Init all other variables
int *dev_flags;
float *dev_randx;
float *dev_randy;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float final_time = 0.0;
// Allocate memory in the GPU for the int array
hipMalloc(&dev_randx, N*sizeof(float));
hipMalloc(&dev_randy, N*sizeof(float));
hipMalloc(&dev_flags, BLOCKS*THREADS*sizeof(int));
// Copy the Memory from the array to the array pointers
hipMemcpy(dev_flags, flags, BLOCKS*THREADS*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_randx, randx, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_randy, randy, N*sizeof(float), hipMemcpyHostToDevice);
// Total Time Record
hipEventRecord(start);
hipLaunchKernelGGL(( monte), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_flags, dev_randx, dev_randy, BLOCKS*THREADS, N);
hipEventRecord(stop);
// Copy the results from GPU to the CPU
hipMemcpy(flags, dev_flags, BLOCKS*THREADS*sizeof(int), hipMemcpyDeviceToHost);
// Count total successes for each thread
int success = 0;
for(int i = 0; i < BLOCKS*THREADS; i++){
if(flags[i] > 0) success += flags[i];
}
// Print Successes, failures, and estimation
//printf("Success: %d\n", success);
//printf("Failure: %d\n", (N - success));
printf("Estimation of Pi: %1.6f\n", ((float)success/N)*4);
hipEventSynchronize(stop);
hipEventElapsedTime(&final_time, start, stop);
printf("Time in Kernel: %1.10f\n\n", final_time/1000);
hipFree(dev_flags);
hipFree(dev_randx);
hipFree(dev_randy);
}
| e0530347c665cb00859b305625efe9693d0c571a.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
/*
* Monte Carlo Pi Estimation Algorithm in CUDA
*
* This Project uses Cuda and thread
* topology to estimate Pi.
*
* Author: Clayton Glenn
*/
#define MAX_THREAD 16
#define MIN_THREAD 8
#define MAX_N 20
#define MIN_N 8
#define BLOCK_SIZE 256
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void monte(int *flags, float *x_vals, float *y_vals, int t, int n) {
//Get Thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Loop N/Threads times plus one
for(int i = 0; i < (n/t + 1); i++){
// If looped id count is less than n, grab rand x
// and y and check within unit. Increment if so
if((i*t+tid) < n){
if((pow(x_vals[(i*t+tid)], 2) + pow(y_vals[(i*t+tid)],2)) <= 1) flags[(tid)]++;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./monte [# threads] [# points]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MIN_THREAD;
int THREADS = MIN_THREAD;
int BLOCKS = 256;
// Check for immediate errors in args
if (argc < 3 || argc > 3) printerror();
// Get Thread Count Per Block
THREADS = strtol(argv[1], NULL, 10);
THREADS = ((int)pow(2, THREADS));
if(THREADS < BLOCKS) BLOCKS = 1;
else THREADS = THREADS / BLOCKS;
// Get N Coordinates
N = strtol(argv[2], NULL, 10);
N = (int)pow(2, N);
// Print N and Threads for distinguish
printf("(Threads: %d) (N: %d)\n", THREADS * BLOCKS, N);
//Set Array of Size Thread
int flags[BLOCKS*THREADS];
float randx[N];
float randy[N];
srand( time( NULL ) );
for(int i = 0; i < N; i++){
if(i < BLOCKS*THREADS)flags[i] = 0;
randx[i] = ( float )rand()/RAND_MAX;
randy[i] = ( float )rand()/RAND_MAX;
}
// Init all other variables
int *dev_flags;
float *dev_randx;
float *dev_randy;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float final_time = 0.0;
// Allocate memory in the GPU for the int array
cudaMalloc(&dev_randx, N*sizeof(float));
cudaMalloc(&dev_randy, N*sizeof(float));
cudaMalloc(&dev_flags, BLOCKS*THREADS*sizeof(int));
// Copy the Memory from the array to the array pointers
cudaMemcpy(dev_flags, flags, BLOCKS*THREADS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_randx, randx, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_randy, randy, N*sizeof(float), cudaMemcpyHostToDevice);
// Total Time Record
cudaEventRecord(start);
monte<<<BLOCKS, THREADS>>>(dev_flags, dev_randx, dev_randy, BLOCKS*THREADS, N);
cudaEventRecord(stop);
// Copy the results from GPU to the CPU
cudaMemcpy(flags, dev_flags, BLOCKS*THREADS*sizeof(int), cudaMemcpyDeviceToHost);
// Count total successes for each thread
int success = 0;
for(int i = 0; i < BLOCKS*THREADS; i++){
if(flags[i] > 0) success += flags[i];
}
// Print Successes, failures, and estimation
//printf("Success: %d\n", success);
//printf("Failure: %d\n", (N - success));
printf("Estimation of Pi: %1.6f\n", ((float)success/N)*4);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&final_time, start, stop);
printf("Time in Kernel: %1.10f\n\n", final_time/1000);
cudaFree(dev_flags);
cudaFree(dev_randx);
cudaFree(dev_randy);
}
|
af6957af2789a3ab1b41dc7a995568a667e89237.hip | // !!! This is a file automatically generated by hipify!!!
#include "custom_cuda_layers.h"
#ifndef __HIP_PLATFORM_HCC__
#include <hip/hip_runtime_api.h>
#endif
namespace cg = cooperative_groups;
namespace cg = cooperative_groups;
__global__ void apply_rotary_pos_emb(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
__global__ void apply_rotary_pos_emb1(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb1(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned seq_index = head_id % seq_len;
unsigned offset = head_id * head_size;
unsigned k_offset = (seq_index + (head_id / seq_len) * MAX_OUT_TOKES) * head_size;
constexpr unsigned mask[32] = {
0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000,
0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000,
0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4,
0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80,
0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000,
0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000,
0x40000000, 0x80000000};
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
unsigned half_dim = rotary_dim >> 1;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[k_offset + lane];
float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim)
: __shfl_sync(mask[lane], q_rot, lane - half_dim);
auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim)
: __shfl_sync(mask[lane], k_rot, lane - half_dim);
q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[k_offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
template <typename T>
void launch_apply_rotary_pos_emb(T* mixed_query,
T* key_layer,
unsigned head_size,
unsigned seq_len,
unsigned rotary_dim,
unsigned offset,
unsigned num_heads,
unsigned batch,
bool rotate_half,
bool rotate_every_two,
hipStream_t stream)
{
int total_count = batch * num_heads * seq_len;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size);
if (rotate_every_two)
hipLaunchKernelGGL(( apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream,
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
else if (rotate_half)
hipLaunchKernelGGL(( apply_rotary_pos_emb1), dim3(grid_dims), dim3(block_dims), 0, stream,
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
}
template void launch_apply_rotary_pos_emb<float>(float*,
float*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
bool,
bool,
hipStream_t);
template void launch_apply_rotary_pos_emb<__half>(__half*,
__half*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
bool,
bool,
hipStream_t);
/*
__global__ void apply_rotary_pos_emb(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000,
0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000,
0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000,
0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8,
0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80,
0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800,
0x1000000, 0x2000000, 0x4000000, 0x8000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000};
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[offset + lane];
float rotary_sign = (lane > 11 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane],
q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane],
k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q *
cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
template <typename T>
void launch_apply_rotary_pos_emb(T* mixed_query,
T* key_layer,
unsigned head_size,
unsigned seq_len,
unsigned rotary_dim,
unsigned offset,
unsigned num_heads,
unsigned batch,
hipStream_t stream)
{
int total_count = batch * num_heads * seq_len;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size);
apply_rotary_pos_emb<<<grid_dims, block_dims, 0, stream>>>(
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
}
template void launch_apply_rotary_pos_emb<float>(float*,
float*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
hipStream_t);
template void launch_apply_rotary_pos_emb<__half>(__half*,
__half*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
hipStream_t);
*/
| af6957af2789a3ab1b41dc7a995568a667e89237.cu | #include "custom_cuda_layers.h"
#ifndef __HIP_PLATFORM_HCC__
#include <cuda_profiler_api.h>
#endif
namespace cg = cooperative_groups;
namespace cg = cooperative_groups;
__global__ void apply_rotary_pos_emb(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
__global__ void apply_rotary_pos_emb1(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb1(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned seq_index = head_id % seq_len;
unsigned offset = head_id * head_size;
unsigned k_offset = (seq_index + (head_id / seq_len) * MAX_OUT_TOKES) * head_size;
constexpr unsigned mask[32] = {
0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000,
0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000,
0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4,
0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80,
0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000,
0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000,
0x40000000, 0x80000000};
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
unsigned half_dim = rotary_dim >> 1;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[k_offset + lane];
float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim)
: __shfl_sync(mask[lane], q_rot, lane - half_dim);
auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim)
: __shfl_sync(mask[lane], k_rot, lane - half_dim);
q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[k_offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
template <typename T>
void launch_apply_rotary_pos_emb(T* mixed_query,
T* key_layer,
unsigned head_size,
unsigned seq_len,
unsigned rotary_dim,
unsigned offset,
unsigned num_heads,
unsigned batch,
bool rotate_half,
bool rotate_every_two,
cudaStream_t stream)
{
int total_count = batch * num_heads * seq_len;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size);
if (rotate_every_two)
apply_rotary_pos_emb<<<grid_dims, block_dims, 0, stream>>>(
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
else if (rotate_half)
apply_rotary_pos_emb1<<<grid_dims, block_dims, 0, stream>>>(
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
}
template void launch_apply_rotary_pos_emb<float>(float*,
float*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
bool,
bool,
cudaStream_t);
template void launch_apply_rotary_pos_emb<__half>(__half*,
__half*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
bool,
bool,
cudaStream_t);
/*
__global__ void apply_rotary_pos_emb(float* mixed_query,
float* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = mixed_query[offset + lane];
float k = key_layer[offset + lane];
float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
q_rot = g.shfl_xor(q_rot, 1);
k_rot = g.shfl_xor(k_rot, 1);
q = q * cosf(inv_freq) + q_rot * sinf(inv_freq);
k = k * cosf(inv_freq) + k_rot * sinf(inv_freq);
mixed_query[offset + lane] = q;
key_layer[offset + lane] = k;
lane += WARP_SIZE;
}
}
}
__global__ void apply_rotary_pos_emb(__half* mixed_query,
__half* key_layer,
unsigned rotary_dim,
unsigned seq_len,
unsigned seq_offset,
unsigned num_heads,
unsigned head_size,
unsigned total_count)
{
#if __CUDA_ARCH__ >= 700
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int id = threadIdx.x;
int gid = id >> 5;
int lane = id & 0x1f;
unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid;
unsigned offset = head_id * head_size;
constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000,
0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000,
0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000,
0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8,
0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80,
0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800,
0x1000000, 0x2000000, 0x4000000, 0x8000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000};
unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset;
if (head_id < total_count) {
while (lane < rotary_dim) {
//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim;
float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim;
inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id;
float q = (float)mixed_query[offset + lane];
float k = (float)key_layer[offset + lane];
float rotary_sign = (lane > 11 ? -1.0 : 1.0);
float q_rot = (q * rotary_sign);
float k_rot = (k * rotary_sign);
auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane],
q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane],
k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q *
cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq);
mixed_query[offset + lane] = (__half)q;
key_layer[offset + lane] = (__half)k;
lane += WARP_SIZE;
}
}
#endif
}
template <typename T>
void launch_apply_rotary_pos_emb(T* mixed_query,
T* key_layer,
unsigned head_size,
unsigned seq_len,
unsigned rotary_dim,
unsigned offset,
unsigned num_heads,
unsigned batch,
cudaStream_t stream)
{
int total_count = batch * num_heads * seq_len;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size);
apply_rotary_pos_emb<<<grid_dims, block_dims, 0, stream>>>(
mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count);
}
template void launch_apply_rotary_pos_emb<float>(float*,
float*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
cudaStream_t);
template void launch_apply_rotary_pos_emb<__half>(__half*,
__half*,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
unsigned,
cudaStream_t);
*/
|
c0ea75180f108a9955e012355e2b29ec812c98a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <poisson.hpp>
#include <poisson.cuh>
#include <assertions.hpp>
#include <grid.hpp>
#include <solver.hpp>
template <typename S, typename P, typename T=double>
void convergence_test(const int num_grids, SolverOptions opts) {
T rate = 0.0;
T err1 = 0.0;
T modes = 1.0;
int l = 2;
T h = 1.0;
printf("MMS convergence test\n");
{
S tmp;
printf("Solver: %s \n", tmp.name());
}
printf("Grid Size \t Iterations \t Time (ms) \t Residual \t Error \t\t Rate \n");
for (int i = 0; i < num_grids; ++i) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
P problem(l, h, modes);
S solver(problem);
hipEventRecord(start);
SolverOutput out = solve(solver, problem, opts);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed = 0;
hipEventElapsedTime(&elapsed, start, stop);
rate = log2(err1 / out.error);
int n = (1 << l) + 1;
printf("%4d x %-4d \t %-7d \t %-5.5f \t %-5.5g \t %-5.5g \t %-5.5f \n",
n, n,
out.iterations, elapsed, out.residual, out.error, rate);
err1 = out.error;
l++;
h /= 2;
}
}
int main(int argc, char **argv) {
using Number = double;
SolverOptions opts;
opts.verbose = 1;
opts.info = 10;
opts.max_iterations = 1e4;
opts.eps = 1e-8;
opts.mms = 1;
int l = 4;
int n = (1 << l) + 1;
double h = 1.0 / (n - 1);
double modes = 1.0;
using Problem = Poisson<Number>;
{
using Problem = Poisson<Number>;
Problem problem(l, h, modes);
using Smoother=GaussSeidelRedBlack;
Smoother solver;
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
Problem problem(l, h, modes);
using Smoother=GaussSeidelRedBlack;
using MG=Multigrid<Smoother, Problem, Number>;
MG mg(problem);
auto out = solve(mg, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
CUDAProblem problem(l, h, modes);
using CUDASmoother = CUDAGaussSeidelRedBlack;
CUDASmoother solver;
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
using CUDASmoother = CUDAGaussSeidelRedBlack;
using CUDAMG = CUDAMultigrid<CUDASmoother, CUDAProblem, Number>;
CUDAProblem problem(l, h, modes);
CUDAMG solver(problem);
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using Smoother=GaussSeidelRedBlack;
using MG=Multigrid<Smoother, Problem, Number>;
opts.verbose = 0;
int num_refinements = 12;
convergence_test<MG, Problem>(num_refinements, opts);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
using CUDASmoother = CUDAGaussSeidelRedBlack;
using CUDAMG = CUDAMultigrid<CUDASmoother, CUDAProblem, Number>;
opts.verbose = 0;
int num_refinements = 12;
convergence_test<CUDAMG, CUDAProblem>(num_refinements, opts);
}
}
| c0ea75180f108a9955e012355e2b29ec812c98a0.cu | #include <stdio.h>
#include <poisson.hpp>
#include <poisson.cuh>
#include <assertions.hpp>
#include <grid.hpp>
#include <solver.hpp>
template <typename S, typename P, typename T=double>
void convergence_test(const int num_grids, SolverOptions opts) {
T rate = 0.0;
T err1 = 0.0;
T modes = 1.0;
int l = 2;
T h = 1.0;
printf("MMS convergence test\n");
{
S tmp;
printf("Solver: %s \n", tmp.name());
}
printf("Grid Size \t Iterations \t Time (ms) \t Residual \t Error \t\t Rate \n");
for (int i = 0; i < num_grids; ++i) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
P problem(l, h, modes);
S solver(problem);
cudaEventRecord(start);
SolverOutput out = solve(solver, problem, opts);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start, stop);
rate = log2(err1 / out.error);
int n = (1 << l) + 1;
printf("%4d x %-4d \t %-7d \t %-5.5f \t %-5.5g \t %-5.5g \t %-5.5f \n",
n, n,
out.iterations, elapsed, out.residual, out.error, rate);
err1 = out.error;
l++;
h /= 2;
}
}
int main(int argc, char **argv) {
using Number = double;
SolverOptions opts;
opts.verbose = 1;
opts.info = 10;
opts.max_iterations = 1e4;
opts.eps = 1e-8;
opts.mms = 1;
int l = 4;
int n = (1 << l) + 1;
double h = 1.0 / (n - 1);
double modes = 1.0;
using Problem = Poisson<Number>;
{
using Problem = Poisson<Number>;
Problem problem(l, h, modes);
using Smoother=GaussSeidelRedBlack;
Smoother solver;
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
Problem problem(l, h, modes);
using Smoother=GaussSeidelRedBlack;
using MG=Multigrid<Smoother, Problem, Number>;
MG mg(problem);
auto out = solve(mg, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
CUDAProblem problem(l, h, modes);
using CUDASmoother = CUDAGaussSeidelRedBlack;
CUDASmoother solver;
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
using CUDASmoother = CUDAGaussSeidelRedBlack;
using CUDAMG = CUDAMultigrid<CUDASmoother, CUDAProblem, Number>;
CUDAProblem problem(l, h, modes);
CUDAMG solver(problem);
auto out = solve(solver, problem, opts);
printf("Iterations: %d, Residual: %g \n", out.iterations, out.residual);
}
{
using Smoother=GaussSeidelRedBlack;
using MG=Multigrid<Smoother, Problem, Number>;
opts.verbose = 0;
int num_refinements = 12;
convergence_test<MG, Problem>(num_refinements, opts);
}
{
using CUDAProblem = CUDAPoisson<L1NORM, Number>;
using CUDASmoother = CUDAGaussSeidelRedBlack;
using CUDAMG = CUDAMultigrid<CUDASmoother, CUDAProblem, Number>;
opts.verbose = 0;
int num_refinements = 12;
convergence_test<CUDAMG, CUDAProblem>(num_refinements, opts);
}
}
|
bab14b7613e0d8005a13f742310fdfe30a83b854.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value,
real max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int i;
int nd = THCTensor_(nDimension)(state, x);
ptrdiff_t nelem = THCTensor_(nElement)(state, x);
THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions");
for (i = 0; i < nd; i++) {
THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i);
if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) {
dimension = i;
}
}
THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1);
THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3,
"dimension %d does not have size 3", dimension+1);
THCTensor_(resizeAs)(state, self, x);
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b));
THArgCheck(THCTensor_(nElement)(state, a) ==
THCTensor_(nElement)(state, b), 3, "sizes do not match");
THCTensor_(resizeAs)(state, result, a);
if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#endif
| bab14b7613e0d8005a13f742310fdfe30a83b854.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value,
real max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int i;
int nd = THCTensor_(nDimension)(state, x);
ptrdiff_t nelem = THCTensor_(nElement)(state, x);
THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions");
for (i = 0; i < nd; i++) {
THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i);
if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) {
dimension = i;
}
}
THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1);
THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3,
"dimension %d does not have size 3", dimension+1);
THCTensor_(resizeAs)(state, self, x);
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b));
THArgCheck(THCTensor_(nElement)(state, a) ==
THCTensor_(nElement)(state, b), 3, "sizes do not match");
THCTensor_(resizeAs)(state, result, a);
if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#endif
|
26e1cb0bdbdeebe5430fb529862ade65d89031d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zdiagcheck.cu, normal z -> d, Thu Oct 8 23:05:48 2020
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// kernel
__global__ void
zdiagcheck_kernel(
int num_rows,
int num_cols,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magma_int_t * dinfo )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int localinfo = 1;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
// check whether there exists a nonzero diagonal entry
for( j=start; j<end; j++){
if( (dcolind[j] == row) && (dval[j] != MAGMA_D_ZERO) ){
localinfo = 0;
}
}
// set flag to 1
if( localinfo == 1 ){
dinfo[0] = -3009;
}
}
}
/**
Purpose
-------
This routine checks for a CSR matrix whether there
exists a zero on the diagonal. This can be the diagonal entry missing
or an explicit zero.
Arguments
---------
@param[in]
dA magma_d_matrix
matrix in CSR format
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_ddiagcheck(
magma_d_matrix dA,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t *hinfo = NULL;
magma_int_t * dinfo = NULL;
dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
CHECK( magma_imalloc( &dinfo, 1 ) );
CHECK( magma_imalloc_cpu( &hinfo, 1 ) );
hinfo[0] = 0;
magma_isetvector( 1, hinfo, 1, dinfo, 1, queue );
hipLaunchKernelGGL(( zdiagcheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo );
info = hinfo[0];
magma_igetvector( 1, dinfo, 1, hinfo, 1, queue );
info = hinfo[0];
cleanup:
magma_free( dinfo );
magma_free_cpu( hinfo );
return info;
}
| 26e1cb0bdbdeebe5430fb529862ade65d89031d8.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zdiagcheck.cu, normal z -> d, Thu Oct 8 23:05:48 2020
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// kernel
__global__ void
zdiagcheck_kernel(
int num_rows,
int num_cols,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magma_int_t * dinfo )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int localinfo = 1;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
// check whether there exists a nonzero diagonal entry
for( j=start; j<end; j++){
if( (dcolind[j] == row) && (dval[j] != MAGMA_D_ZERO) ){
localinfo = 0;
}
}
// set flag to 1
if( localinfo == 1 ){
dinfo[0] = -3009;
}
}
}
/**
Purpose
-------
This routine checks for a CSR matrix whether there
exists a zero on the diagonal. This can be the diagonal entry missing
or an explicit zero.
Arguments
---------
@param[in]
dA magma_d_matrix
matrix in CSR format
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_ddiagcheck(
magma_d_matrix dA,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t *hinfo = NULL;
magma_int_t * dinfo = NULL;
dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
CHECK( magma_imalloc( &dinfo, 1 ) );
CHECK( magma_imalloc_cpu( &hinfo, 1 ) );
hinfo[0] = 0;
magma_isetvector( 1, hinfo, 1, dinfo, 1, queue );
zdiagcheck_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo );
info = hinfo[0];
magma_igetvector( 1, dinfo, 1, hinfo, 1, queue );
info = hinfo[0];
cleanup:
magma_free( dinfo );
magma_free_cpu( hinfo );
return info;
}
|
a59e8c8dbc7a606822664e81a88992944ce6d108.hip | // !!! This is a file automatically generated by hipify!!!
/*
This version of the program calculates the X-X exchange constant. Coordinates: (z_e1, z_h1, z_e2, z_h2) + (r_e1h1, r_e2h2, xi)^(2d), xi = R1 - R2
In this version, the parameters dZ, sizeRho, sizeZe, sizeZh are retrieved from text files (made by the 'run_one_direct_calc' program) automatically
The paths must be rewritten for each machine's folder structure and project location
The wf names are recommended to only contain the width of the QW (e.g. "WQW=Xnm.bin")
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "constants_types.h"
#include "fileFunctions.h"
#include "excitonWavefunctionFunctions.h"
#include "gpu_functions.h"
#include "gpuReductionSum.h"
int main()
{
time_t tic;
time_t toc;
time_t seed;
FILE* filenameFile; // file with filenames to process
filenameFile = fopen("filenames.txt", "r");
int bufferLength = 255;
char* buffer = new char[bufferLength];
char* X_wf_params_filename = new char[bufferLength];
char* X_wf_filename = new char[bufferLength];
int counter = 0;
while (fgets(buffer, bufferLength, filenameFile)) {
if (strcmp(buffer, "end")) {
counter++;
}
}
printf("Calculations to process: %d\n\n", counter);
rewind(filenameFile);
printFile("filenames.txt", filenameFile);
printf("==================================================================================================\n");
while (fgets(buffer, bufferLength, filenameFile)) {
if (strcmp(buffer, "end")) { // returns 0 when buffer == "end"
/* create filenames for *.txt and *.bin files */
strncpy(X_wf_params_filename, buffer, strlen(buffer) - 1); // a filename shouldn't end with \n
X_wf_params_filename[strlen(buffer) - 1] = '\0'; // strncpy doesn't append 'filename' with a proper end-symbol, do it manually
strcat(X_wf_params_filename, ".txt");
strncpy(X_wf_filename, buffer, strlen(buffer) - 1);
X_wf_filename[strlen(buffer) - 1] = '\0';
strcat(X_wf_filename, ".bin");
/* load X wf parameters -------------------------------------------------------------------------------------------------------------------------------- */
printf(" Exciton wavefunction parameters *.txt file:\n\t'%s'\n", X_wf_params_filename);
wf_parameter_struct* X_wf_params;
wf_parameter_struct* gpu_X_wf_params;
X_wf_params = new wf_parameter_struct();
if (loadExcitonWfParams(X_wf_params_filename, X_wf_params) == 0) {// loaded exc. wf parameters into cpu memory
delete X_wf_params;
printf("\nSkipping to next file...\n==================================================================================================\n");
continue;
}
gpuErrchk(hipMalloc((void**)&gpu_X_wf_params, sizeof(wf_parameter_struct)));
gpuErrchk(hipMemcpy(gpu_X_wf_params, X_wf_params, sizeof(wf_parameter_struct), hipMemcpyHostToDevice)); // copied the params into gpu memory as well
/* load X wavefunction --------------------------------------------------------------------------------------------------------------------------------- */
double* wf, * gpu_wf;
wf = new double[X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1];
unsigned long long file_size;
printf(" Exciton wavefunction *.bin file:\n\t'%s'\n", X_wf_filename);
load(X_wf_filename, wf, &file_size, X_wf_params);
normalize(wf, numThrowsNorm, X_wf_params);
checkNormalizationMC(wf, numThrowsNorm / 10, X_wf_params); // check normalization using MC integration in (rho, phi, ze, zh) coordinates
// copy the normalized wave function to device memory
gpuErrchk(hipMalloc((void**)&gpu_wf, (X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1) * sizeof(double)));
gpuErrchk(hipMemcpy(gpu_wf, wf, (X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1) * sizeof(double), hipMemcpyHostToDevice));
delete[] wf;
/* done loading X wf -------------------------------------------------------------------------------------------------------------------------------------- */
int blockSize = 512;
int numBlocks = (N + blockSize - 1) / blockSize;
for (int num_q = 0; num_q < 19; num_q++) { // loop for q dependency
hiprandState_t* states;
gpuErrchk(hipMalloc((void**)&states, N * sizeof(hiprandState_t))); // space for random states
double cpu_f; // variable for sum of integrand function values inside a run on cpu
double cpu_f2; // var for the sum of it's squares (to calculate error later) on cpu
double intValue = 0.0; // vars to accumulate final values across all runs
double intError = 0.0;
double temp_res = 0; // vars for storing int. estimates in real-time
double temp_err = 0;
double* gpu_f; // array for integrand function values at N random points on gpu
double* gpu_f2; // array for it's squares (to calculate error later) on gpu
gpuErrchk(hipMalloc((void**)&gpu_f, numPoints * sizeof(double)));
gpuErrchk(hipMalloc((void**)&gpu_f2, numPoints * sizeof(double)));
double* gpu_f_out;
double* gpu_f2_out;
gpuErrchk(hipMalloc((void**)&gpu_f_out, numPoints * sizeof(double)));
gpuErrchk(hipMalloc((void**)&gpu_f2_out, numPoints * sizeof(double)));
//printf("\n Rx = %e meV\n\n", Rx / e * 1e3);
//printf("\n a0_hh = %e m", a0_hh);
//printf("\n lambda_2d = %e m\n\n", lambda_2d);
printf("--------------------------------------------------------------------------------------------\n");
printf(" Calc parameters:\n\tnumPointsNorm = %.3e\n\tnumPoints = %.3e, numRun = %.1e, max total points = %.3e, \n\ttol = %.1e\tq = %3.1f * a0_hh = %.3e\n", (double)numThrowsNorm, (double)numPoints, (double)numRun, (double)numPoints * numRun, tol, q[num_q] * a0_hh, q[num_q]);
printf("--------------------------------------------------------------------------------------------\n");
printf(" Calculation controls:\n");
printf(" \t'p' -- pause\n");
printf(" \t'n' -- skip to next filename\n");
printf(" \t'b' -- break\n");
printf(" ______________________________________________________________________\n");
printf(" J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
char filename[] = "integral__.dat";
FILE* F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, "\n============================================================================================\n");
fprintf(F, "'%s'\n", X_wf_params_filename);
fprintf(F, " Calc parameters:\n\tnumPointsNorm = %.3e\n\tnumPoints = %.3e, numRun = %.1e, max total points = %.3e, \n\ttol = %.1e\tq = %3.1f * a0_hh = %.3e\n", (double)numThrowsNorm, (double)numPoints, (double)numRun, (double)numPoints * numRun, tol, q[num_q] * a0_hh, q[num_q]);
//fprintf(F, "--------------------------------------------------------------------------------------------\n");
fprintf(F, " ______________________________________________________________________\n");
fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fclose(F);
tic = clock();
seed = tic + time(0);
initRand << <numBlocks, blockSize >> > (seed, 0, states); // invoke the GPU to initialize all of the random states
gpuErrchk(hipDeviceSynchronize());
printf("\t ");
long long int runCounter;
for (runCounter = 0; runCounter < numRun; runCounter++) {
// initRand << <numBlocks, blockSize >> > (time(0)+clock(), 0, states); // invoke the GPU to initialize all of the random states
// gpuErrchk(hipDeviceSynchronize());
// calculate exciton coulomb energy for testing:
//intMC_J_xx_exch << <numBlocks, blockSize >> > (states, gpu_f, gpu_f2, gpu_wf, gpu_X_wf_params, X_wf_params->L, dim, q[num_q]); // accumulate func and func^2 evaluations in gpu_f and gpu_f2
intMC_J_xx_dir << <numBlocks, blockSize >> > (states, gpu_f, gpu_f2, gpu_wf, gpu_X_wf_params, X_wf_params->L, dim, q[num_q]); // accumulate func and func^2 evaluations in gpu_f and gpu_f2
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
sumGPUDouble(gpu_f, gpu_f_out, numPoints);
sumGPUDouble(gpu_f2, gpu_f2_out, numPoints);
/* copy back */
gpuErrchk(hipMemcpy(&cpu_f, gpu_f_out, sizeof(double), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&cpu_f2, gpu_f2_out, sizeof(double), hipMemcpyDeviceToHost));
intValue += cpu_f;
intError += cpu_f2;
// real-time output
if (runCounter % 100 == 99) { // we lose speed if we printf on every run
for (int bCount = 0; bCount < (150); bCount++) // erase old line
printf("\b");
temp_res = X_wf_params->V_MC * intValue / ((runCounter + 1) * numPoints);
temp_err = 3 * X_wf_params->V_MC / sqrt((runCounter + 1) * numPoints) * sqrt(intError / ((runCounter + 1) * numPoints) - intValue * intValue / ((runCounter + 1) * numPoints) / ((runCounter + 1) * numPoints));
printf("\t%13e\t%12e", temp_res, temp_err);
printf("\t %9e", (double)(runCounter + 1) * numPoints);
toc = clock();
printf("\t %7e s", double(toc - tic) / CLOCKS_PER_SEC);
//printf("\tJ_ex = %13e pm %12e mueV ", X_wf_params->V_MC_Ex * intValue / ((runCounter + 1) * numPoints), 3 * X_wf_params->V_MC_Ex / sqrt((runCounter + 1) * numPoints) * sqrt(intError / ((runCounter + 1) * numPoints) - intValue * intValue / ((runCounter + 1) * numPoints) / ((runCounter + 1) * numPoints)));
if (temp_err < tol) {
printf("\n--------------------------------------------------------------------------------------------\n");
printf("\n\tDesired tolerance reached: temp_err < %.4f\n\n", tol);
printf("=============================================================================================================================\n\n\n");
break; // skip to end of this calculation
}
// keyboard control
if (_kbhit()) {
char kb = _getch(); // consume the char from the buffer, otherwise _kbhit remains != 0
if (kb == 'p') {
char filename[] = "integral__.dat";
FILE* F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, " ______________________________________________________________________\n");
fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fprintf(F, "\t%13e\t %12e", temp_res, temp_err);
fprintf(F, "\t %9e", (double)(runCounter + 1) * numPoints);
fprintf(F, "\t %7e s\n", double(toc - tic) / CLOCKS_PER_SEC);
fprintf(F, "--------------------------------------------------------------------------------------------\n");
fclose(F);
printf("\n\n Program paused: intermediate results appended to file \"%s\".\n", filename);
printf(" To continue, press any key.\n\n");
_getch(); // wait for a second key press to continue calculation
printf(" ______________________________________________________________________\n");
printf(" J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
}
else if (kb == 'n') {
printf("\n=============================================================================================================================\n\n");
printf(" Skipping to next calculation...\n\n");
printf("=============================================================================================================================\n\n\n");
break;// skip to end of this calculation
}
else if (kb == 'b') {
printf("\n=============================================================================================================================\n\n");
printf(" Program stopped.\n\n");
printf("=============================================================================================================================\n\n\n");
exit(10);
}
}
}
}
F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, "Final value:\n");
//fprintf(F, " ______________________________________________________________________\n");
//fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fprintf(F, "\t%13e\t %12e", temp_res, temp_err);
fprintf(F, "\t %9e", (double)(runCounter + 1) * numPoints);
fprintf(F, "\t %7e s\n", double(toc - tic) / CLOCKS_PER_SEC);
fprintf(F, "====================================================================================================\n");
fclose(F);
gpuErrchk(hipFree(states));
gpuErrchk(hipFree(gpu_f));
gpuErrchk(hipFree(gpu_f2));
gpuErrchk(hipFree(gpu_f_out));
gpuErrchk(hipFree(gpu_f2_out));
}
delete[] X_wf_params;
gpuErrchk(hipFree(gpu_wf));
gpuErrchk(hipFree(gpu_X_wf_params));
}
}
printf("\n\n\n\t\tAll calculations processed.\n\n");
fclose(filenameFile);
delete[] buffer;
delete[] X_wf_params_filename;
delete[] X_wf_filename;
return 0;
} | a59e8c8dbc7a606822664e81a88992944ce6d108.cu | /*
This version of the program calculates the X-X exchange constant. Coordinates: (z_e1, z_h1, z_e2, z_h2) + (r_e1h1, r_e2h2, xi)^(2d), xi = R1 - R2
In this version, the parameters dZ, sizeRho, sizeZe, sizeZh are retrieved from text files (made by the 'run_one_direct_calc' program) automatically
The paths must be rewritten for each machine's folder structure and project location
The wf names are recommended to only contain the width of the QW (e.g. "WQW=Xnm.bin")
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include "curand_kernel.h"
#include "constants_types.h"
#include "fileFunctions.h"
#include "excitonWavefunctionFunctions.h"
#include "gpu_functions.h"
#include "gpuReductionSum.h"
int main()
{
time_t tic;
time_t toc;
time_t seed;
FILE* filenameFile; // file with filenames to process
filenameFile = fopen("filenames.txt", "r");
int bufferLength = 255;
char* buffer = new char[bufferLength];
char* X_wf_params_filename = new char[bufferLength];
char* X_wf_filename = new char[bufferLength];
int counter = 0;
while (fgets(buffer, bufferLength, filenameFile)) {
if (strcmp(buffer, "end")) {
counter++;
}
}
printf("Calculations to process: %d\n\n", counter);
rewind(filenameFile);
printFile("filenames.txt", filenameFile);
printf("==================================================================================================\n");
while (fgets(buffer, bufferLength, filenameFile)) {
if (strcmp(buffer, "end")) { // returns 0 when buffer == "end"
/* create filenames for *.txt and *.bin files */
strncpy(X_wf_params_filename, buffer, strlen(buffer) - 1); // a filename shouldn't end with \n
X_wf_params_filename[strlen(buffer) - 1] = '\0'; // strncpy doesn't append 'filename' with a proper end-symbol, do it manually
strcat(X_wf_params_filename, ".txt");
strncpy(X_wf_filename, buffer, strlen(buffer) - 1);
X_wf_filename[strlen(buffer) - 1] = '\0';
strcat(X_wf_filename, ".bin");
/* load X wf parameters -------------------------------------------------------------------------------------------------------------------------------- */
printf(" Exciton wavefunction parameters *.txt file:\n\t'%s'\n", X_wf_params_filename);
wf_parameter_struct* X_wf_params;
wf_parameter_struct* gpu_X_wf_params;
X_wf_params = new wf_parameter_struct();
if (loadExcitonWfParams(X_wf_params_filename, X_wf_params) == 0) {// loaded exc. wf parameters into cpu memory
delete X_wf_params;
printf("\nSkipping to next file...\n==================================================================================================\n");
continue;
}
gpuErrchk(cudaMalloc((void**)&gpu_X_wf_params, sizeof(wf_parameter_struct)));
gpuErrchk(cudaMemcpy(gpu_X_wf_params, X_wf_params, sizeof(wf_parameter_struct), cudaMemcpyHostToDevice)); // copied the params into gpu memory as well
/* load X wavefunction --------------------------------------------------------------------------------------------------------------------------------- */
double* wf, * gpu_wf;
wf = new double[X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1];
unsigned long long file_size;
printf(" Exciton wavefunction *.bin file:\n\t'%s'\n", X_wf_filename);
load(X_wf_filename, wf, &file_size, X_wf_params);
normalize(wf, numThrowsNorm, X_wf_params);
checkNormalizationMC(wf, numThrowsNorm / 10, X_wf_params); // check normalization using MC integration in (rho, phi, ze, zh) coordinates
// copy the normalized wave function to device memory
gpuErrchk(cudaMalloc((void**)&gpu_wf, (X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1) * sizeof(double)));
gpuErrchk(cudaMemcpy(gpu_wf, wf, (X_wf_params->sizeRho * X_wf_params->sizeZe * X_wf_params->sizeZh + 1) * sizeof(double), cudaMemcpyHostToDevice));
delete[] wf;
/* done loading X wf -------------------------------------------------------------------------------------------------------------------------------------- */
int blockSize = 512;
int numBlocks = (N + blockSize - 1) / blockSize;
for (int num_q = 0; num_q < 19; num_q++) { // loop for q dependency
curandState_t* states;
gpuErrchk(cudaMalloc((void**)&states, N * sizeof(curandState_t))); // space for random states
double cpu_f; // variable for sum of integrand function values inside a run on cpu
double cpu_f2; // var for the sum of it's squares (to calculate error later) on cpu
double intValue = 0.0; // vars to accumulate final values across all runs
double intError = 0.0;
double temp_res = 0; // vars for storing int. estimates in real-time
double temp_err = 0;
double* gpu_f; // array for integrand function values at N random points on gpu
double* gpu_f2; // array for it's squares (to calculate error later) on gpu
gpuErrchk(cudaMalloc((void**)&gpu_f, numPoints * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&gpu_f2, numPoints * sizeof(double)));
double* gpu_f_out;
double* gpu_f2_out;
gpuErrchk(cudaMalloc((void**)&gpu_f_out, numPoints * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&gpu_f2_out, numPoints * sizeof(double)));
//printf("\n Rx = %e meV\n\n", Rx / e * 1e3);
//printf("\n a0_hh = %e m", a0_hh);
//printf("\n lambda_2d = %e m\n\n", lambda_2d);
printf("--------------------------------------------------------------------------------------------\n");
printf(" Calc parameters:\n\tnumPointsNorm = %.3e\n\tnumPoints = %.3e, numRun = %.1e, max total points = %.3e, \n\ttol = %.1e\tq = %3.1f * a0_hh = %.3e\n", (double)numThrowsNorm, (double)numPoints, (double)numRun, (double)numPoints * numRun, tol, q[num_q] * a0_hh, q[num_q]);
printf("--------------------------------------------------------------------------------------------\n");
printf(" Calculation controls:\n");
printf(" \t'p' -- pause\n");
printf(" \t'n' -- skip to next filename\n");
printf(" \t'b' -- break\n");
printf(" ______________________________________________________________________\n");
printf(" J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
char filename[] = "integral__.dat";
FILE* F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, "\n============================================================================================\n");
fprintf(F, "'%s'\n", X_wf_params_filename);
fprintf(F, " Calc parameters:\n\tnumPointsNorm = %.3e\n\tnumPoints = %.3e, numRun = %.1e, max total points = %.3e, \n\ttol = %.1e\tq = %3.1f * a0_hh = %.3e\n", (double)numThrowsNorm, (double)numPoints, (double)numRun, (double)numPoints * numRun, tol, q[num_q] * a0_hh, q[num_q]);
//fprintf(F, "--------------------------------------------------------------------------------------------\n");
fprintf(F, " ______________________________________________________________________\n");
fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fclose(F);
tic = clock();
seed = tic + time(0);
initRand << <numBlocks, blockSize >> > (seed, 0, states); // invoke the GPU to initialize all of the random states
gpuErrchk(cudaDeviceSynchronize());
printf("\t ");
long long int runCounter;
for (runCounter = 0; runCounter < numRun; runCounter++) {
// initRand << <numBlocks, blockSize >> > (time(0)+clock(), 0, states); // invoke the GPU to initialize all of the random states
// gpuErrchk(cudaDeviceSynchronize());
// calculate exciton coulomb energy for testing:
//intMC_J_xx_exch << <numBlocks, blockSize >> > (states, gpu_f, gpu_f2, gpu_wf, gpu_X_wf_params, X_wf_params->L, dim, q[num_q]); // accumulate func and func^2 evaluations in gpu_f and gpu_f2
intMC_J_xx_dir << <numBlocks, blockSize >> > (states, gpu_f, gpu_f2, gpu_wf, gpu_X_wf_params, X_wf_params->L, dim, q[num_q]); // accumulate func and func^2 evaluations in gpu_f and gpu_f2
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
sumGPUDouble(gpu_f, gpu_f_out, numPoints);
sumGPUDouble(gpu_f2, gpu_f2_out, numPoints);
/* copy back */
gpuErrchk(cudaMemcpy(&cpu_f, gpu_f_out, sizeof(double), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&cpu_f2, gpu_f2_out, sizeof(double), cudaMemcpyDeviceToHost));
intValue += cpu_f;
intError += cpu_f2;
// real-time output
if (runCounter % 100 == 99) { // we lose speed if we printf on every run
for (int bCount = 0; bCount < (150); bCount++) // erase old line
printf("\b");
temp_res = X_wf_params->V_MC * intValue / ((runCounter + 1) * numPoints);
temp_err = 3 * X_wf_params->V_MC / sqrt((runCounter + 1) * numPoints) * sqrt(intError / ((runCounter + 1) * numPoints) - intValue * intValue / ((runCounter + 1) * numPoints) / ((runCounter + 1) * numPoints));
printf("\t%13e\t%12e", temp_res, temp_err);
printf("\t %9e", (double)(runCounter + 1) * numPoints);
toc = clock();
printf("\t %7e s", double(toc - tic) / CLOCKS_PER_SEC);
//printf("\tJ_ex = %13e pm %12e mueV ", X_wf_params->V_MC_Ex * intValue / ((runCounter + 1) * numPoints), 3 * X_wf_params->V_MC_Ex / sqrt((runCounter + 1) * numPoints) * sqrt(intError / ((runCounter + 1) * numPoints) - intValue * intValue / ((runCounter + 1) * numPoints) / ((runCounter + 1) * numPoints)));
if (temp_err < tol) {
printf("\n--------------------------------------------------------------------------------------------\n");
printf("\n\tDesired tolerance reached: temp_err < %.4f\n\n", tol);
printf("=============================================================================================================================\n\n\n");
break; // skip to end of this calculation
}
// keyboard control
if (_kbhit()) {
char kb = _getch(); // consume the char from the buffer, otherwise _kbhit remains != 0
if (kb == 'p') {
char filename[] = "integral__.dat";
FILE* F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, " ______________________________________________________________________\n");
fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fprintf(F, "\t%13e\t %12e", temp_res, temp_err);
fprintf(F, "\t %9e", (double)(runCounter + 1) * numPoints);
fprintf(F, "\t %7e s\n", double(toc - tic) / CLOCKS_PER_SEC);
fprintf(F, "--------------------------------------------------------------------------------------------\n");
fclose(F);
printf("\n\n Program paused: intermediate results appended to file \"%s\".\n", filename);
printf(" To continue, press any key.\n\n");
_getch(); // wait for a second key press to continue calculation
printf(" ______________________________________________________________________\n");
printf(" J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
}
else if (kb == 'n') {
printf("\n=============================================================================================================================\n\n");
printf(" Skipping to next calculation...\n\n");
printf("=============================================================================================================================\n\n\n");
break;// skip to end of this calculation
}
else if (kb == 'b') {
printf("\n=============================================================================================================================\n\n");
printf(" Program stopped.\n\n");
printf("=============================================================================================================================\n\n\n");
exit(10);
}
}
}
}
F = fopen(filename, "a");
if (F == NULL)
printf("Failed opening file \"%s\"! \n", filename);
fprintf(F, "Final value:\n");
//fprintf(F, " ______________________________________________________________________\n");
//fprintf(F, " J_X-X & error, mueV*mum^2 | total points | elapsed time\n");
fprintf(F, "\t%13e\t %12e", temp_res, temp_err);
fprintf(F, "\t %9e", (double)(runCounter + 1) * numPoints);
fprintf(F, "\t %7e s\n", double(toc - tic) / CLOCKS_PER_SEC);
fprintf(F, "====================================================================================================\n");
fclose(F);
gpuErrchk(cudaFree(states));
gpuErrchk(cudaFree(gpu_f));
gpuErrchk(cudaFree(gpu_f2));
gpuErrchk(cudaFree(gpu_f_out));
gpuErrchk(cudaFree(gpu_f2_out));
}
delete[] X_wf_params;
gpuErrchk(cudaFree(gpu_wf));
gpuErrchk(cudaFree(gpu_X_wf_params));
}
}
printf("\n\n\n\t\tAll calculations processed.\n\n");
fclose(filenameFile);
delete[] buffer;
delete[] X_wf_params_filename;
delete[] X_wf_filename;
return 0;
} |
68b9ff30aca04d8f5e3359fd1e7fc173a8328b18.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define m1 0x5555555555555555
#define m2 0x3333333333333333
#define m4 0x0f0f0f0f0f0f0f0f
#define h01 0x0101010101010101
#define BLOCK_SIZE 256
// reference implementation
int popcount_ref(unsigned long x)
{
int count;
for (count=0; x; count++)
x &= x - 1;
return count;
}
// CUDA kernels
__global__ void
pc1 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
x -= (x >> 1) & m1; //put count of each 2 bits into those 2 bits
x = (x & m2) + ((x >> 2) & m2); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & m4; //put count of each 8 bits into those 8 bits
x += x >> 8; //put count of each 16 bits into their lowest 8 bits
x += x >> 16; //put count of each 32 bits into their lowest 8 bits
x += x >> 32; //put count of each 64 bits into their lowest 8 bits
r[i] = x & 0x7f;
}
__global__ void
pc2 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
x -= (x >> 1) & m1; //put count of each 2 bits into those 2 bits
x = (x & m2) + ((x >> 2) & m2); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & m4; //put count of each 8 bits into those 8 bits
r[i] = (x * h01) >> 56; //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
}
__global__ void
pc3 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
char count;
unsigned long x = data[i];
for (count=0; x; count++) x &= x - 1;
r[i] = count;
}
__global__ void
pc4 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
char cnt = 0;
for (char i = 0; i < 64; i++)
{
cnt = cnt + (x & 0x1);
x = x >> 1;
}
r[i] = cnt;
}
__global__ void
pc5 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
const unsigned char a[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char b[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char c[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char d[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
unsigned char i1 = a[(x & 0xFF)];
unsigned char i2 = a[(x >> 8) & 0xFF];
unsigned char i3 = b[(x >> 16) & 0xFF];
unsigned char i4 = b[(x >> 24) & 0xFF];
unsigned char i5 = c[(x >> 32) & 0xFF];
unsigned char i6 = c[(x >> 40) & 0xFF];
unsigned char i7 = d[(x >> 48) & 0xFF];
unsigned char i8 = d[(x >> 56) & 0xFF];
r[i] = (i1+i2)+(i3+i4)+(i5+i6)+(i7+i8);
}
void checkResults(const unsigned long *d, const int *r, const int length)
{
int error = 0;
for (int i=0;i<length;i++)
if (popcount_ref(d[i]) != r[i]) {
error = 1;
break;
}
if (error)
printf("Fail\n");
else
printf("Success\n");
}
int main(int argc, char* argv[])
{
unsigned long length = atol(argv[1]);
unsigned long *data = NULL;
int* result = NULL;
posix_memalign((void**)&data, 1024, length*sizeof(unsigned long));
posix_memalign((void**)&result, 1024, length*sizeof(int));
// initialize input
srand(2);
for (int i = 0; i < length; i++) {
unsigned long t = (unsigned long)rand() << 32;
data[i] = t | rand();
}
// run each popcount implementation 100 times
unsigned long* d_data;
hipMalloc((void**)&d_data, sizeof(unsigned long)*length);
hipMemcpy(d_data, data, sizeof(unsigned long)*length, hipMemcpyHostToDevice);
int* d_result;
hipMalloc((void**)&d_result, sizeof(int)*length);
dim3 grids ((length+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 threads (BLOCK_SIZE);
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( pc1), dim3(grids), dim3(threads), 0, 0, d_data, d_result, length);
}
hipMemcpy(result, d_result, sizeof(int)*length, hipMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( pc2), dim3(grids), dim3(threads), 0, 0, d_data, d_result, length);
}
hipMemcpy(result, d_result, sizeof(int)*length, hipMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( pc3), dim3(grids), dim3(threads), 0, 0, d_data, d_result, length);
}
hipMemcpy(result, d_result, sizeof(int)*length, hipMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( pc4), dim3(grids), dim3(threads), 0, 0, d_data, d_result, length);
}
hipMemcpy(result, d_result, sizeof(int)*length, hipMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( pc5), dim3(grids), dim3(threads), 0, 0, d_data, d_result, length);
}
hipMemcpy(result, d_result, sizeof(int)*length, hipMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
hipFree(d_data);
hipFree(d_result);
free(data);
free(result);
return 0;
}
| 68b9ff30aca04d8f5e3359fd1e7fc173a8328b18.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define m1 0x5555555555555555
#define m2 0x3333333333333333
#define m4 0x0f0f0f0f0f0f0f0f
#define h01 0x0101010101010101
#define BLOCK_SIZE 256
// reference implementation
int popcount_ref(unsigned long x)
{
int count;
for (count=0; x; count++)
x &= x - 1;
return count;
}
// CUDA kernels
__global__ void
pc1 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
x -= (x >> 1) & m1; //put count of each 2 bits into those 2 bits
x = (x & m2) + ((x >> 2) & m2); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & m4; //put count of each 8 bits into those 8 bits
x += x >> 8; //put count of each 16 bits into their lowest 8 bits
x += x >> 16; //put count of each 32 bits into their lowest 8 bits
x += x >> 32; //put count of each 64 bits into their lowest 8 bits
r[i] = x & 0x7f;
}
__global__ void
pc2 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
x -= (x >> 1) & m1; //put count of each 2 bits into those 2 bits
x = (x & m2) + ((x >> 2) & m2); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & m4; //put count of each 8 bits into those 8 bits
r[i] = (x * h01) >> 56; //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
}
__global__ void
pc3 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
char count;
unsigned long x = data[i];
for (count=0; x; count++) x &= x - 1;
r[i] = count;
}
__global__ void
pc4 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
char cnt = 0;
for (char i = 0; i < 64; i++)
{
cnt = cnt + (x & 0x1);
x = x >> 1;
}
r[i] = cnt;
}
__global__ void
pc5 (const unsigned long* data, int* r, const int length)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= length) return;
unsigned long x = data[i];
const unsigned char a[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char b[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char c[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
const unsigned char d[256] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8};
unsigned char i1 = a[(x & 0xFF)];
unsigned char i2 = a[(x >> 8) & 0xFF];
unsigned char i3 = b[(x >> 16) & 0xFF];
unsigned char i4 = b[(x >> 24) & 0xFF];
unsigned char i5 = c[(x >> 32) & 0xFF];
unsigned char i6 = c[(x >> 40) & 0xFF];
unsigned char i7 = d[(x >> 48) & 0xFF];
unsigned char i8 = d[(x >> 56) & 0xFF];
r[i] = (i1+i2)+(i3+i4)+(i5+i6)+(i7+i8);
}
void checkResults(const unsigned long *d, const int *r, const int length)
{
int error = 0;
for (int i=0;i<length;i++)
if (popcount_ref(d[i]) != r[i]) {
error = 1;
break;
}
if (error)
printf("Fail\n");
else
printf("Success\n");
}
int main(int argc, char* argv[])
{
unsigned long length = atol(argv[1]);
unsigned long *data = NULL;
int* result = NULL;
posix_memalign((void**)&data, 1024, length*sizeof(unsigned long));
posix_memalign((void**)&result, 1024, length*sizeof(int));
// initialize input
srand(2);
for (int i = 0; i < length; i++) {
unsigned long t = (unsigned long)rand() << 32;
data[i] = t | rand();
}
// run each popcount implementation 100 times
unsigned long* d_data;
cudaMalloc((void**)&d_data, sizeof(unsigned long)*length);
cudaMemcpy(d_data, data, sizeof(unsigned long)*length, cudaMemcpyHostToDevice);
int* d_result;
cudaMalloc((void**)&d_result, sizeof(int)*length);
dim3 grids ((length+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 threads (BLOCK_SIZE);
for (int n = 0; n < 100; n++) {
pc1<<<grids, threads>>>(d_data, d_result, length);
}
cudaMemcpy(result, d_result, sizeof(int)*length, cudaMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
pc2<<<grids, threads>>>(d_data, d_result, length);
}
cudaMemcpy(result, d_result, sizeof(int)*length, cudaMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
pc3<<<grids, threads>>>(d_data, d_result, length);
}
cudaMemcpy(result, d_result, sizeof(int)*length, cudaMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
pc4<<<grids, threads>>>(d_data, d_result, length);
}
cudaMemcpy(result, d_result, sizeof(int)*length, cudaMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
for (int n = 0; n < 100; n++) {
pc5<<<grids, threads>>>(d_data, d_result, length);
}
cudaMemcpy(result, d_result, sizeof(int)*length, cudaMemcpyDeviceToHost);
checkResults(data, result, length);
//========================================================================================
cudaFree(d_data);
cudaFree(d_result);
free(data);
free(result);
return 0;
}
|
e1dfa92565ff34cf85beda98655ff4b272e1fd49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/optimizers/lars_momentum_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void MomentumLarsKernel(const T* p, const T* g, const T* v,
const T* learning_rate, const T mu,
const int64_t num, const T lars_coeff,
const T lars_weight_decay, const T* p_norm,
const T* g_norm, T* p_out, T* v_out) {
T lr = learning_rate[0];
T local_lr = learning_rate[0];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
if (p_norm[0] > 0 && g_norm[0] > 0) {
local_lr = lr * lars_coeff * p_norm[0] /
(g_norm[0] + lars_weight_decay * p_norm[0]);
}
T v_new = v[i] * mu + local_lr * (g[i] + lars_weight_decay * p[i]);
v_out[i] = v_new;
p_out[i] = p[i] - v_new;
}
}
template <typename DeviceContext, typename T>
class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto param_out = ctx.Output<framework::LoDTensor>("ParamOut");
auto velocity_out = ctx.Output<framework::LoDTensor>("VelocityOut");
auto param = ctx.Input<framework::LoDTensor>("Param");
auto velocity = ctx.Input<framework::LoDTensor>("Velocity");
auto grad = ctx.Input<framework::LoDTensor>("Grad");
auto learning_rate = ctx.Input<framework::LoDTensor>("LearningRate");
T* p_out = param_out->mutable_data<T>(ctx.GetPlace());
T* v_out = velocity_out->mutable_data<T>(ctx.GetPlace());
T mu = static_cast<T>(ctx.Attr<float>("mu"));
T lars_coeff = ctx.Attr<float>("lars_coeff");
T lars_weight_decay = ctx.Attr<float>("lars_weight_decay");
auto* p = param->data<T>();
auto* v = velocity->data<T>();
auto* g = grad->data<T>();
auto* lr = learning_rate->data<T>();
int block = 512;
int grid = (param->numel() + block - 1) / block;
auto eigen_p = framework::EigenVector<T>::Flatten(*param);
auto eigen_g = framework::EigenVector<T>::Flatten(*grad);
// calculate norms using eigein and launch the kernel.
framework::Tensor p_norm_t, g_norm_t;
p_norm_t.Resize({1});
g_norm_t.Resize({1});
auto* p_norm_data = p_norm_t.mutable_data<T>(ctx.GetPlace());
auto* g_norm_data = g_norm_t.mutable_data<T>(ctx.GetPlace());
auto ep_norm = framework::EigenScalar<T>::From(p_norm_t);
auto eg_norm = framework::EigenScalar<T>::From(g_norm_t);
auto* place = ctx.template device_context<DeviceContext>().eigen_device();
ep_norm.device(*place) = eigen_p.square().sum().sqrt();
eg_norm.device(*place) = eigen_g.square().sum().sqrt();
hipLaunchKernelGGL(( MomentumLarsKernel), dim3(grid), dim3(block), 0, ctx.cuda_device_context().stream(),
p, g, v, lr, mu, param->numel(), lars_coeff, lars_weight_decay,
p_norm_data, g_norm_data, p_out, v_out);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
lars_momentum,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>);
| e1dfa92565ff34cf85beda98655ff4b272e1fd49.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/optimizers/lars_momentum_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void MomentumLarsKernel(const T* p, const T* g, const T* v,
const T* learning_rate, const T mu,
const int64_t num, const T lars_coeff,
const T lars_weight_decay, const T* p_norm,
const T* g_norm, T* p_out, T* v_out) {
T lr = learning_rate[0];
T local_lr = learning_rate[0];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
if (p_norm[0] > 0 && g_norm[0] > 0) {
local_lr = lr * lars_coeff * p_norm[0] /
(g_norm[0] + lars_weight_decay * p_norm[0]);
}
T v_new = v[i] * mu + local_lr * (g[i] + lars_weight_decay * p[i]);
v_out[i] = v_new;
p_out[i] = p[i] - v_new;
}
}
template <typename DeviceContext, typename T>
class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto param_out = ctx.Output<framework::LoDTensor>("ParamOut");
auto velocity_out = ctx.Output<framework::LoDTensor>("VelocityOut");
auto param = ctx.Input<framework::LoDTensor>("Param");
auto velocity = ctx.Input<framework::LoDTensor>("Velocity");
auto grad = ctx.Input<framework::LoDTensor>("Grad");
auto learning_rate = ctx.Input<framework::LoDTensor>("LearningRate");
T* p_out = param_out->mutable_data<T>(ctx.GetPlace());
T* v_out = velocity_out->mutable_data<T>(ctx.GetPlace());
T mu = static_cast<T>(ctx.Attr<float>("mu"));
T lars_coeff = ctx.Attr<float>("lars_coeff");
T lars_weight_decay = ctx.Attr<float>("lars_weight_decay");
auto* p = param->data<T>();
auto* v = velocity->data<T>();
auto* g = grad->data<T>();
auto* lr = learning_rate->data<T>();
int block = 512;
int grid = (param->numel() + block - 1) / block;
auto eigen_p = framework::EigenVector<T>::Flatten(*param);
auto eigen_g = framework::EigenVector<T>::Flatten(*grad);
// calculate norms using eigein and launch the kernel.
framework::Tensor p_norm_t, g_norm_t;
p_norm_t.Resize({1});
g_norm_t.Resize({1});
auto* p_norm_data = p_norm_t.mutable_data<T>(ctx.GetPlace());
auto* g_norm_data = g_norm_t.mutable_data<T>(ctx.GetPlace());
auto ep_norm = framework::EigenScalar<T>::From(p_norm_t);
auto eg_norm = framework::EigenScalar<T>::From(g_norm_t);
auto* place = ctx.template device_context<DeviceContext>().eigen_device();
ep_norm.device(*place) = eigen_p.square().sum().sqrt();
eg_norm.device(*place) = eigen_g.square().sum().sqrt();
MomentumLarsKernel<<<grid, block, 0, ctx.cuda_device_context().stream()>>>(
p, g, v, lr, mu, param->numel(), lars_coeff, lars_weight_decay,
p_norm_data, g_norm_data, p_out, v_out);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
lars_momentum,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>);
|
1b0c354eb8584fb54898d43ffd520a26bb8aae1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* testScanBlock.cu
*
* Microdemo to test block scan algorithms. These are built on top of
* the warp scan algorithms in the warp directory.
*
* Build with: nvcc -I ..\chLib <options> testScanBlock.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <chAssert.h>
#include <chError.h>
#include "scanWarp.cuh"
#include "scanWarp2.cuh"
#include "scanWarpShuffle_hip.cuh"
#include "scanBlock.cuh"
//#include "scanBlockShuffle.cuh"
#include "scanZeroPad.cuh"
#define min(a,b) ((a)<(b)?(a):(b))
enum ScanType {
Inclusive, Exclusive
};
template<int period>
void
ScanExclusiveCPUPeriodic( int *out, const int *in, size_t N )
{
for ( size_t i = 0; i < N; i += period ) {
int sum = 0;
for ( size_t j = 0; j < period; j++ ) {
int next = in[i+j]; // in case we are doing this in place
out[i+j] = sum;
sum += next;
}
}
}
template<int period>
void
ScanInclusiveCPUPeriodic( int *out, const int *in, size_t N )
{
for ( size_t i = 0; i < N; i += period ) {
int sum = 0;
for ( size_t j = 0; j < period; j++ ) {
sum += in[i+j];
out[i+j] = sum;
}
}
}
template<ScanType scantype>
void
ScanCPU32( int *out, const int *in, size_t N )
{
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<32>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<32>( out, in, N );
}
}
template<ScanType scantype>
void
ScanCPUBlock( int *out, const int *in, size_t N, int numThreads )
{
switch ( numThreads ) {
case 256:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<256>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<256>( out, in, N );
}
case 512:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<512>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<512>( out, in, N );
}
case 1024:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<1024>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<1024>( out, in, N );
}
default: return;
}
}
void
RandomArray( int *out, size_t N, int modulus )
{
for ( size_t i = 0; i < N; i++ ) {
out[i] = rand() % modulus;
}
}
template<ScanType scantype>
__global__ void
ScanGPUWarp( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
if ( scantype == Inclusive ) {
out[i+threadIdx.x] = scanWarp<int,false>( sPartials+threadIdx.x );
}
else {
out[i+threadIdx.x] = scanWarpExclusive<int,false>( sPartials+threadIdx.x );
}
}
}
template<ScanType scantype>
void
ScanGPU(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanGPUWarp<scantype>), dim3(cBlocks), dim3(cThreads), cThreads*sizeof(int), 0,
out, in, N );
}
template<ScanType scantype>
__global__ void
ScanGPUBlock( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int tid = threadIdx.x;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[tid] = in[i+tid];
__syncthreads();
int myValue = scanBlock<int,false>( sPartials+tid, scanWarp<int,false> );
if ( scantype==Exclusive) {
__syncthreads();
myValue = (tid) ? sPartials[tid-1] : 0;
}
out[i+threadIdx.x] = myValue;
}
}
template<ScanType scantype>
void
ScanGPUBlock(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanGPUBlock<scantype>), dim3(cBlocks), dim3(cThreads), cThreads*sizeof(int), 0,
out, in, N );
}
template<ScanType scantype, int logBlockSize>
__global__ void
ScanGPUBlockShuffle( int *out, const int *in, size_t N )
{
const int tid = threadIdx.x;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
int myValue = in[i+tid];
if ( scantype == Exclusive ) {
myValue = exclusive_scan_block<logBlockSize>( myValue, tid );
}
else {
myValue = inclusive_scan_block<logBlockSize>( myValue, tid );
}
out[i+threadIdx.x] = myValue;
}
}
template<ScanType scantype>
void
ScanGPUBlockShuffle(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
/*if ( scantype == Inclusive )*/ {
switch( cThreads ) {
case 128: returnhipLaunchKernelGGL(( ScanGPUBlockShuffle<scantype, 7>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in, N );
case 256: returnhipLaunchKernelGGL(( ScanGPUBlockShuffle<scantype, 8>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in, N );
case 512: returnhipLaunchKernelGGL(( ScanGPUBlockShuffle<scantype, 9>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in, N );
case 1024: returnhipLaunchKernelGGL(( ScanGPUBlockShuffle<scantype,10>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in, N );
}
#if 0
hipLaunchKernelGGL(( ScanGPUBlockShuffle<scantype>), dim3(cBlocks), dim3(cThreads), 0, 0,
out, in, N );
#endif
}
hipLaunchKernelGGL(( ScanGPUBlock<scantype>), dim3(cBlocks), dim3(cThreads), cThreads*sizeof(int), 0,
out, in, N );
}
__global__ void
ScanInclusiveGPUWarp_0( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int sIndex = scanSharedIndex<true>( threadIdx.x );
sPartials[sIndex-16] = 0;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[sIndex] = in[i+threadIdx.x];
out[i+threadIdx.x] = scanWarp<int,true>( sPartials+sIndex );
}
}
void
ScanInclusiveGPU_0(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanInclusiveGPUWarp_0), dim3(cBlocks),
dim3(cThreads),
scanSharedMemory<int,true>(cThreads), 0,
out, in, N );
}
__global__ void
ScanExclusiveGPUWarp_0( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int sIndex = scanSharedIndex<true>( threadIdx.x );
sPartials[sIndex-16] = 0;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[sIndex] = in[i+threadIdx.x];
out[i+threadIdx.x] = scanWarpExclusive<int,true>( sPartials+sIndex );
}
}
void
ScanExclusiveGPU_0(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanExclusiveGPUWarp_0), dim3(cBlocks),
dim3(cThreads),
scanSharedMemory<int,true>(cThreads), 0,
out, in, N );
}
__global__ void
ScanInclusiveGPUWarp2( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
out[i+threadIdx.x] = scanWarp2<int,false>( sPartials+threadIdx.x );
}
}
void
ScanInclusiveGPU2(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanInclusiveGPUWarp2), dim3(cBlocks), dim3(cThreads), cThreads*sizeof(int), 0,
out, in, N );
}
__global__ void
ScanExclusiveGPUWarp2( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
out[i+threadIdx.x] = scanWarpExclusive2<int,false>( sPartials+threadIdx.x );
}
}
void
ScanExclusiveGPU2(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanExclusiveGPUWarp2), dim3(cBlocks), dim3(cThreads), cThreads*sizeof(int), 0,
out, in, N );
}
template<ScanType scantype>
__global__ void
ScanGPUWarpShuffle( int *out, const int *in, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
if ( scantype == Inclusive ) {
out[i+threadIdx.x] = inclusive_scan_warp_shfl<5>( in[i+threadIdx.x] );
}
else {
out[i+threadIdx.x] = exclusive_scan_warp_shfl<5>( in[i+threadIdx.x] );
}
}
}
template<ScanType scantype>
void
ScanGPUShuffle(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
hipLaunchKernelGGL(( ScanGPUWarpShuffle<scantype>), dim3(cBlocks), dim3(cThreads), 0, 0, out, in, N );
}
template<class T>
bool
TestScanBlock(
float *pMelementspersecond,
const char *szScanFunction,
void (*pfnScanCPU)(T *, const T *, size_t, int),
void (*pfnScanGPU)(T *, const T *, size_t, int),
size_t N,
int numThreads )
{
bool ret = false;
hipError_t status;
int *inGPU = 0;
int *outGPU = 0;
int *inCPU = (T *) malloc( N*sizeof(T) );
int *outCPU = (int *) malloc( N*sizeof(T) );
int *hostGPU = (int *) malloc( N*sizeof(T) );
hipEvent_t evStart = 0, evStop = 0;
if ( 0==inCPU || 0==outCPU || 0==hostGPU )
goto Error;
printf( "Testing %s (%d threads/block)\n", szScanFunction, numThreads );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
cuda(Malloc( &inGPU, N*sizeof(T) ) );
cuda(Malloc( &outGPU, N*sizeof(T) ) );
cuda(Memset( inGPU, 0, N*sizeof(T) ) );
cuda(Memset( outGPU, 0, N*sizeof(T) ) );
cuda(Memset( outGPU, 0, N*sizeof(T) ) );
RandomArray( inCPU, N, 256 );
for ( int i = 0; i < N; i++ ) {
inCPU[i] = i;
}
pfnScanCPU( outCPU, inCPU, N, numThreads );
cuda(Memcpy( inGPU, inCPU, N*sizeof(T), hipMemcpyHostToDevice ) );
cuda(EventRecord( evStart, 0 ) );
pfnScanGPU( outGPU, inGPU, N, numThreads );
cuda(EventRecord( evStop, 0 ) );
cuda(Memcpy( hostGPU, outGPU, N*sizeof(T), hipMemcpyDeviceToHost ) );
for ( size_t i = 0; i < N; i++ ) {
if ( hostGPU[i] != outCPU[i] ) {
printf( "Scan failed\n" );
#ifdef _WIN32
__debugbreak();//_asm int 3
#else
assert(0);
#endif
goto Error;
}
}
{
float ms;
cuda(EventElapsedTime( &ms, evStart, evStop ) );
double Melements = N/1e6;
*pMelementspersecond = 1000.0f*Melements/ms;
}
ret = true;
Error:
hipEventDestroy( evStart );
hipEventDestroy( evStop );
hipFree( outGPU );
hipFree( inGPU );
free( inCPU );
free( outCPU );
free( hostGPU );
return ret;
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int maxThreads;
int numInts = 32*1048576;
cuda(SetDevice( 0 ) );
cuda(SetDeviceFlags( hipDeviceMapHost ) );
{
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
maxThreads = prop.maxThreadsPerBlock;
}
#define SCAN_TEST_VECTOR( CPUFunction, GPUFunction, N, numThreads ) do { \
float fMelementsPerSecond; \
srand(0); \
bool bSuccess = TestScanBlock<int>( &fMelementsPerSecond, #GPUFunction, CPUFunction, GPUFunction, N, numThreads ); \
if ( ! bSuccess ) { \
printf( "%s failed: N=%d, numThreads=%d\n", #GPUFunction, N, numThreads ); \
exit(1); \
} \
if ( fMelementsPerSecond > maxElementsPerSecond ) { \
maxElementsPerSecond = fMelementsPerSecond; \
} \
\
} while (0)
printf( "Problem size: %d integers\n", numInts );
for ( int numThreads = 256; numThreads <= maxThreads; numThreads *= 2 ) {
float maxElementsPerSecond = 0.0f;
#if 0
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanGPUBlock<Exclusive>, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanExclusiveGPU_0, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanExclusiveGPU2, numInts, numThreads );
printf( "GPU2: %.2f Melements/s\n", maxElementsPerSecond );
#endif
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanGPUBlockShuffle<Exclusive>, numInts, numThreads );
printf( "Shuffle: %.2f Melements/s\n", maxElementsPerSecond );
}
for ( int numThreads = 256; numThreads <= maxThreads; numThreads *= 2 ) {
float maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Inclusive>, ScanGPUBlock<Inclusive>, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
#if 0
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPU32<Inclusive>, ScanInclusiveGPU_0, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPU32<Inclusive>, ScanInclusiveGPU2, numInts, numThreads );
printf( "GPU2: %.2f Melements/s\n", maxElementsPerSecond );
#endif
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Inclusive>, ScanGPUBlockShuffle<Inclusive>, numInts, numThreads );
printf( "Shuffle: %.2f Melements/s\n", maxElementsPerSecond );
}
return 0;
Error:
return 1;
}
| 1b0c354eb8584fb54898d43ffd520a26bb8aae1b.cu | /*
*
* testScanBlock.cu
*
* Microdemo to test block scan algorithms. These are built on top of
* the warp scan algorithms in the warp directory.
*
* Build with: nvcc -I ..\chLib <options> testScanBlock.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <chAssert.h>
#include <chError.h>
#include "scanWarp.cuh"
#include "scanWarp2.cuh"
#include "scanWarpShuffle.cuh"
#include "scanBlock.cuh"
//#include "scanBlockShuffle.cuh"
#include "scanZeroPad.cuh"
#define min(a,b) ((a)<(b)?(a):(b))
enum ScanType {
Inclusive, Exclusive
};
template<int period>
void
ScanExclusiveCPUPeriodic( int *out, const int *in, size_t N )
{
for ( size_t i = 0; i < N; i += period ) {
int sum = 0;
for ( size_t j = 0; j < period; j++ ) {
int next = in[i+j]; // in case we are doing this in place
out[i+j] = sum;
sum += next;
}
}
}
template<int period>
void
ScanInclusiveCPUPeriodic( int *out, const int *in, size_t N )
{
for ( size_t i = 0; i < N; i += period ) {
int sum = 0;
for ( size_t j = 0; j < period; j++ ) {
sum += in[i+j];
out[i+j] = sum;
}
}
}
template<ScanType scantype>
void
ScanCPU32( int *out, const int *in, size_t N )
{
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<32>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<32>( out, in, N );
}
}
template<ScanType scantype>
void
ScanCPUBlock( int *out, const int *in, size_t N, int numThreads )
{
switch ( numThreads ) {
case 256:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<256>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<256>( out, in, N );
}
case 512:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<512>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<512>( out, in, N );
}
case 1024:
switch ( scantype ) {
case Exclusive: return ScanExclusiveCPUPeriodic<1024>( out, in, N );
case Inclusive: return ScanInclusiveCPUPeriodic<1024>( out, in, N );
}
default: return;
}
}
void
RandomArray( int *out, size_t N, int modulus )
{
for ( size_t i = 0; i < N; i++ ) {
out[i] = rand() % modulus;
}
}
template<ScanType scantype>
__global__ void
ScanGPUWarp( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
if ( scantype == Inclusive ) {
out[i+threadIdx.x] = scanWarp<int,false>( sPartials+threadIdx.x );
}
else {
out[i+threadIdx.x] = scanWarpExclusive<int,false>( sPartials+threadIdx.x );
}
}
}
template<ScanType scantype>
void
ScanGPU(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanGPUWarp<scantype><<<cBlocks, cThreads, cThreads*sizeof(int)>>>(
out, in, N );
}
template<ScanType scantype>
__global__ void
ScanGPUBlock( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int tid = threadIdx.x;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[tid] = in[i+tid];
__syncthreads();
int myValue = scanBlock<int,false>( sPartials+tid, scanWarp<int,false> );
if ( scantype==Exclusive) {
__syncthreads();
myValue = (tid) ? sPartials[tid-1] : 0;
}
out[i+threadIdx.x] = myValue;
}
}
template<ScanType scantype>
void
ScanGPUBlock(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanGPUBlock<scantype><<<cBlocks, cThreads, cThreads*sizeof(int)>>>(
out, in, N );
}
template<ScanType scantype, int logBlockSize>
__global__ void
ScanGPUBlockShuffle( int *out, const int *in, size_t N )
{
const int tid = threadIdx.x;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
int myValue = in[i+tid];
if ( scantype == Exclusive ) {
myValue = exclusive_scan_block<logBlockSize>( myValue, tid );
}
else {
myValue = inclusive_scan_block<logBlockSize>( myValue, tid );
}
out[i+threadIdx.x] = myValue;
}
}
template<ScanType scantype>
void
ScanGPUBlockShuffle(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
/*if ( scantype == Inclusive )*/ {
switch( cThreads ) {
case 128: return ScanGPUBlockShuffle<scantype, 7><<<cBlocks,cThreads>>>( out, in, N );
case 256: return ScanGPUBlockShuffle<scantype, 8><<<cBlocks,cThreads>>>( out, in, N );
case 512: return ScanGPUBlockShuffle<scantype, 9><<<cBlocks,cThreads>>>( out, in, N );
case 1024: return ScanGPUBlockShuffle<scantype,10><<<cBlocks,cThreads>>>( out, in, N );
}
#if 0
ScanGPUBlockShuffle<scantype><<<cBlocks, cThreads>>>(
out, in, N );
#endif
}
ScanGPUBlock<scantype><<<cBlocks, cThreads, cThreads*sizeof(int)>>>(
out, in, N );
}
__global__ void
ScanInclusiveGPUWarp_0( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int sIndex = scanSharedIndex<true>( threadIdx.x );
sPartials[sIndex-16] = 0;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[sIndex] = in[i+threadIdx.x];
out[i+threadIdx.x] = scanWarp<int,true>( sPartials+sIndex );
}
}
void
ScanInclusiveGPU_0(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanInclusiveGPUWarp_0<<<cBlocks,
cThreads,
scanSharedMemory<int,true>(cThreads)>>>(
out, in, N );
}
__global__ void
ScanExclusiveGPUWarp_0( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
const int sIndex = scanSharedIndex<true>( threadIdx.x );
sPartials[sIndex-16] = 0;
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[sIndex] = in[i+threadIdx.x];
out[i+threadIdx.x] = scanWarpExclusive<int,true>( sPartials+sIndex );
}
}
void
ScanExclusiveGPU_0(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanExclusiveGPUWarp_0<<<cBlocks,
cThreads,
scanSharedMemory<int,true>(cThreads)>>>(
out, in, N );
}
__global__ void
ScanInclusiveGPUWarp2( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
out[i+threadIdx.x] = scanWarp2<int,false>( sPartials+threadIdx.x );
}
}
void
ScanInclusiveGPU2(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanInclusiveGPUWarp2<<<cBlocks, cThreads, cThreads*sizeof(int)>>>(
out, in, N );
}
__global__ void
ScanExclusiveGPUWarp2( int *out, const int *in, size_t N )
{
extern __shared__ int sPartials[];
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
sPartials[threadIdx.x] = in[i+threadIdx.x];
__syncthreads();
out[i+threadIdx.x] = scanWarpExclusive2<int,false>( sPartials+threadIdx.x );
}
}
void
ScanExclusiveGPU2(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanExclusiveGPUWarp2<<<cBlocks, cThreads, cThreads*sizeof(int)>>>(
out, in, N );
}
template<ScanType scantype>
__global__ void
ScanGPUWarpShuffle( int *out, const int *in, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x;
i < N;
i += blockDim.x ) {
if ( scantype == Inclusive ) {
out[i+threadIdx.x] = inclusive_scan_warp_shfl<5>( in[i+threadIdx.x] );
}
else {
out[i+threadIdx.x] = exclusive_scan_warp_shfl<5>( in[i+threadIdx.x] );
}
}
}
template<ScanType scantype>
void
ScanGPUShuffle(
int *out,
const int *in,
size_t N,
int cThreads )
{
int cBlocks = (int) (N/150);
if ( cBlocks > 150 ) {
cBlocks = 150;
}
ScanGPUWarpShuffle<scantype><<<cBlocks, cThreads>>>( out, in, N );
}
template<class T>
bool
TestScanBlock(
float *pMelementspersecond,
const char *szScanFunction,
void (*pfnScanCPU)(T *, const T *, size_t, int),
void (*pfnScanGPU)(T *, const T *, size_t, int),
size_t N,
int numThreads )
{
bool ret = false;
cudaError_t status;
int *inGPU = 0;
int *outGPU = 0;
int *inCPU = (T *) malloc( N*sizeof(T) );
int *outCPU = (int *) malloc( N*sizeof(T) );
int *hostGPU = (int *) malloc( N*sizeof(T) );
cudaEvent_t evStart = 0, evStop = 0;
if ( 0==inCPU || 0==outCPU || 0==hostGPU )
goto Error;
printf( "Testing %s (%d threads/block)\n", szScanFunction, numThreads );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
cuda(Malloc( &inGPU, N*sizeof(T) ) );
cuda(Malloc( &outGPU, N*sizeof(T) ) );
cuda(Memset( inGPU, 0, N*sizeof(T) ) );
cuda(Memset( outGPU, 0, N*sizeof(T) ) );
cuda(Memset( outGPU, 0, N*sizeof(T) ) );
RandomArray( inCPU, N, 256 );
for ( int i = 0; i < N; i++ ) {
inCPU[i] = i;
}
pfnScanCPU( outCPU, inCPU, N, numThreads );
cuda(Memcpy( inGPU, inCPU, N*sizeof(T), cudaMemcpyHostToDevice ) );
cuda(EventRecord( evStart, 0 ) );
pfnScanGPU( outGPU, inGPU, N, numThreads );
cuda(EventRecord( evStop, 0 ) );
cuda(Memcpy( hostGPU, outGPU, N*sizeof(T), cudaMemcpyDeviceToHost ) );
for ( size_t i = 0; i < N; i++ ) {
if ( hostGPU[i] != outCPU[i] ) {
printf( "Scan failed\n" );
#ifdef _WIN32
__debugbreak();//_asm int 3
#else
assert(0);
#endif
goto Error;
}
}
{
float ms;
cuda(EventElapsedTime( &ms, evStart, evStop ) );
double Melements = N/1e6;
*pMelementspersecond = 1000.0f*Melements/ms;
}
ret = true;
Error:
cudaEventDestroy( evStart );
cudaEventDestroy( evStop );
cudaFree( outGPU );
cudaFree( inGPU );
free( inCPU );
free( outCPU );
free( hostGPU );
return ret;
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int maxThreads;
int numInts = 32*1048576;
cuda(SetDevice( 0 ) );
cuda(SetDeviceFlags( cudaDeviceMapHost ) );
{
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
maxThreads = prop.maxThreadsPerBlock;
}
#define SCAN_TEST_VECTOR( CPUFunction, GPUFunction, N, numThreads ) do { \
float fMelementsPerSecond; \
srand(0); \
bool bSuccess = TestScanBlock<int>( &fMelementsPerSecond, #GPUFunction, CPUFunction, GPUFunction, N, numThreads ); \
if ( ! bSuccess ) { \
printf( "%s failed: N=%d, numThreads=%d\n", #GPUFunction, N, numThreads ); \
exit(1); \
} \
if ( fMelementsPerSecond > maxElementsPerSecond ) { \
maxElementsPerSecond = fMelementsPerSecond; \
} \
\
} while (0)
printf( "Problem size: %d integers\n", numInts );
for ( int numThreads = 256; numThreads <= maxThreads; numThreads *= 2 ) {
float maxElementsPerSecond = 0.0f;
#if 0
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanGPUBlock<Exclusive>, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanExclusiveGPU_0, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanExclusiveGPU2, numInts, numThreads );
printf( "GPU2: %.2f Melements/s\n", maxElementsPerSecond );
#endif
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Exclusive>, ScanGPUBlockShuffle<Exclusive>, numInts, numThreads );
printf( "Shuffle: %.2f Melements/s\n", maxElementsPerSecond );
}
for ( int numThreads = 256; numThreads <= maxThreads; numThreads *= 2 ) {
float maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Inclusive>, ScanGPUBlock<Inclusive>, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
#if 0
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPU32<Inclusive>, ScanInclusiveGPU_0, numInts, numThreads );
printf( "GPU: %.2f Melements/s\n", maxElementsPerSecond );
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPU32<Inclusive>, ScanInclusiveGPU2, numInts, numThreads );
printf( "GPU2: %.2f Melements/s\n", maxElementsPerSecond );
#endif
maxElementsPerSecond = 0.0f;
SCAN_TEST_VECTOR( ScanCPUBlock<Inclusive>, ScanGPUBlockShuffle<Inclusive>, numInts, numThreads );
printf( "Shuffle: %.2f Melements/s\n", maxElementsPerSecond );
}
return 0;
Error:
return 1;
}
|
732df9878fc33fc57f5f706475534110dedfe7e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "GLImageCudas.h"
#include "ColorTools_Device.h"
__global__ static void kernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t);
__device__ static float color(int w, int h, float x, float y, float t);
__device__ static float d(int w, int h, float x, float y);
void useKernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t){
dim3 blockPerGrid = dim3(32, 32, 1);
dim3 threadPerBlock = dim3(16, 16, 1);
hipLaunchKernelGGL(( kernelAnimationHSB), dim3(blockPerGrid),dim3(threadPerBlock), 0, 0, ptrDevPixels, w, h, t);
}
__global__ static void kernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
int tid = j + (i * nbThreadX);
int pixelI;
int pixelJ;
while(tid < (w * h)){
pixelI = tid / w;
pixelJ = tid - w * pixelI;
float c = color(w, h, pixelI, pixelJ, t);
ptrDevPixels[tid].x = c;
ptrDevPixels[tid].y = c;
ptrDevPixels[tid].z = c;
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
__device__ static float color(int w, int h, float x, float y, float t){
return 128 + 127 * ((cos(d(w, h, x,y) / (float)10 -(t / (float)7))) / (d(w, h, x, y) / 10 + 1));
}
__device__ static float d(int w, int h, float x, float y){
float fx = x - (w / 2);
float fy = y - (h / 2);
return sqrt(fx * fx + fy * fy);
}
| 732df9878fc33fc57f5f706475534110dedfe7e6.cu | #include <iostream>
#include "GLImageCudas.h"
#include "ColorTools_Device.h"
__global__ static void kernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t);
__device__ static float color(int w, int h, float x, float y, float t);
__device__ static float d(int w, int h, float x, float y);
void useKernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t){
dim3 blockPerGrid = dim3(32, 32, 1);
dim3 threadPerBlock = dim3(16, 16, 1);
kernelAnimationHSB<<<blockPerGrid,threadPerBlock>>>(ptrDevPixels, w, h, t);
}
__global__ static void kernelAnimationHSB(uchar4* ptrDevPixels, int w, int h, float t){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
int tid = j + (i * nbThreadX);
int pixelI;
int pixelJ;
while(tid < (w * h)){
pixelI = tid / w;
pixelJ = tid - w * pixelI;
float c = color(w, h, pixelI, pixelJ, t);
ptrDevPixels[tid].x = c;
ptrDevPixels[tid].y = c;
ptrDevPixels[tid].z = c;
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
__device__ static float color(int w, int h, float x, float y, float t){
return 128 + 127 * ((cos(d(w, h, x,y) / (float)10 -(t / (float)7))) / (d(w, h, x, y) / 10 + 1));
}
__device__ static float d(int w, int h, float x, float y){
float fx = x - (w / 2);
float fy = y - (h / 2);
return sqrt(fx * fx + fy * fy);
}
|
e11611888475e9a7167693a9c546945a78712fb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
comp = (+1.7403E-37f / +1.0513E-3f);
if (comp <= ceilf(+1.7902E19f)) {
float tmp_1 = (var_2 + fabsf(-1.8896E18f - var_3 + -1.1632E-36f));
comp += tmp_1 * var_4 - (+1.3793E-44f + var_5);
}
for (int i=0; i < var_1; ++i) {
comp += +1.6315E-24f - +1.1241E34f;
float tmp_2 = -1.9265E4f;
comp += tmp_2 - fabsf(+1.2871E-36f / var_6 - var_7);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
hipDeviceSynchronize();
return 0;
}
| e11611888475e9a7167693a9c546945a78712fb1.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
comp = (+1.7403E-37f / +1.0513E-3f);
if (comp <= ceilf(+1.7902E19f)) {
float tmp_1 = (var_2 + fabsf(-1.8896E18f - var_3 + -1.1632E-36f));
comp += tmp_1 * var_4 - (+1.3793E-44f + var_5);
}
for (int i=0; i < var_1; ++i) {
comp += +1.6315E-24f - +1.1241E34f;
float tmp_2 = -1.9265E4f;
comp += tmp_2 - fabsf(+1.2871E-36f / var_6 - var_7);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
cudaDeviceSynchronize();
return 0;
}
|
d8fde661fed05bab0d687c3b950324cb53aa8c92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int idx = numCols*(blockIdx.y) + threadIdx.x;
uchar4 rgba = rgbaImage[idx];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize( 1, numRows, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| d8fde661fed05bab0d687c3b950324cb53aa8c92.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int idx = numCols*(blockIdx.y) + threadIdx.x;
uchar4 rgba = rgbaImage[idx];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize( 1, numRows, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
cb9aba6713cc94d23beff4aef713e0533da1556c.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef IMP_CU_GAUSS_IMPL_CU
#define IMP_CU_GAUSS_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <hip/hip_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
//-----------------------------------------------------------------------------
/** Perform a convolution with an gaussian smoothing kernel
* @param dst pointer to output image (linear memory)
* @param stride length of image row [pixels]
* @param xoff x-coordinate offset where to start the region [pixels]
* @param yoff y-coordinate offset where to start the region [pixels]
* @param width width of region [pixels]
* @param height height of region [pixels]
* @param kernel_size lenght of the smoothing kernel [pixels]
* @param horizontal defines the direction of convolution
*/
template<typename Pixel>
__global__ void k_gauss(Pixel* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height,
Texture2D src_tex, int kernel_size, float c0,
float c1, bool horizontal=true)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const size_t out_idx = y*stride+x;
if(x>=0 && y>= 0 && x<width && y<height)
{
x += xoff;
y += yoff;
float sum = 0.0f;
int half_kernel_elements = (kernel_size - 1) / 2;
Pixel texel_c, texel;
tex2DFetch(texel_c, src_tex, x, y);
if (horizontal)
{
// convolve horizontally
float g2 = c1 * c1;
sum = c0 * texel_c;
float sum_coeff = c0;
for (int i = 1; i <= half_kernel_elements; i++)
{
c0 *= c1;
c1 *= g2;
int cur_x = max(0, min(width-1, x+i));
tex2DFetch(texel, src_tex, cur_x, y);
sum += c0 * texel;
cur_x = max(0, min(width-1, x-i));
tex2DFetch(texel, src_tex, cur_x, y);
sum += c0 * texel;
sum_coeff += 2.0f*c0;
}
dst[out_idx] = sum/sum_coeff;
}
else
{
// convolve vertically
float g2 = c1 * c1;
sum = c0 * texel_c;
float sum_coeff = c0;
for (int j = 1; j <= half_kernel_elements; j++)
{
c0 *= c1;
c1 *= g2;
float cur_y = max(0, min(height-1, y+j));
tex2DFetch(texel, src_tex, x, cur_y);
sum += c0 * texel;
cur_y = max(0, min(height-1, y-j));
tex2DFetch(texel, src_tex, x, cur_y);
sum += c0 * texel;
sum_coeff += 2.0f*c0;
}
dst[out_idx] = sum/sum_coeff;
}
}
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterGauss(ImageGpu<Pixel, pixel_type>& dst,
const Texture2D& src_tex,
float sigma, int kernel_size,
ImageGpuPtr<Pixel, pixel_type> tmp_img)
// hipStream_t stream);
{
Roi2u roi = dst.roi();
if (kernel_size == 0)
kernel_size = max(5, static_cast<int>(::ceil(sigma*3)*2 + 1));
if (kernel_size % 2 == 0)
++kernel_size;
// temporary variable for filtering (separabel kernel!)
if (!tmp_img || dst.roi().size() != tmp_img->size());
{
tmp_img.reset(new ImageGpu<Pixel, pixel_type>(roi.size()));
}
// fragmentation
Fragmentation<> frag(roi);
float c0 = 1.0f / (std::sqrt(2.0f * M_PI)*sigma);
float c1 = ::exp(-0.5f / (sigma * sigma));
// Convolve horizontally
hipLaunchKernelGGL(( k_gauss)
,
dim3(frag.dimGrid), dim3(frag.dimBlock//), 0, stream
, 0, 0, tmp_img->data(), tmp_img->stride(),
roi.x(), roi.y(), tmp_img->width(), tmp_img->height(),
src_tex, /*sigma, */kernel_size, c0, c1, false);
IMP_CUDA_CHECK();
std::shared_ptr<Texture2D> tmp_tex =
tmp_img->genTexture(false,(tmp_img->bitDepth()<32) ? hipFilterModePoint
: hipFilterModeLinear);
IMP_CUDA_CHECK();
// Convolve vertically
hipLaunchKernelGGL(( k_gauss)
,
dim3(frag.dimGrid), dim3(frag.dimBlock//), 0, stream
, 0, 0, dst.data(), dst.stride(),
roi.x(), roi.y(), roi.width(), roi.height(),
*tmp_tex, /*sigma, */kernel_size, c0, c1, true);
IMP_CUDA_CHECK();
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterGauss(ImageGpu<Pixel, pixel_type>& dst,
const ImageGpu<Pixel, pixel_type>& src,
float sigma, int kernel_size,
ImageGpuPtr<Pixel, pixel_type> tmp_img)
// hipStream_t stream);
{
std::shared_ptr<Texture2D> src_tex =
src.genTexture(false,(src.bitDepth()<32) ? hipFilterModePoint
: hipFilterModeLinear);
imp::Roi2u roi = src.roi();
if (dst.roi().size() != roi.size())
dst.setRoi(roi);
imp::cu::filterGauss(dst, *src_tex, sigma, kernel_size, tmp_img);
IMP_CUDA_CHECK();
}
//==============================================================================
//
// template instantiations for all our ima ge types
//
template void filterGauss(ImageGpu8uC1& dst, const ImageGpu8uC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC1> tmp_imp);
template void filterGauss(ImageGpu8uC2& dst, const ImageGpu8uC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC2> tmp_imp);
template void filterGauss(ImageGpu8uC4& dst, const ImageGpu8uC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC4> tmp_imp);
template void filterGauss(ImageGpu16uC1& dst, const ImageGpu16uC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC1> tmp_imp);
template void filterGauss(ImageGpu16uC2& dst, const ImageGpu16uC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC2> tmp_imp);
template void filterGauss(ImageGpu16uC4& dst, const ImageGpu16uC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC4> tmp_imp);
template void filterGauss(ImageGpu32sC1& dst, const ImageGpu32sC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC1> tmp_imp);
template void filterGauss(ImageGpu32sC2& dst, const ImageGpu32sC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC2> tmp_imp);
template void filterGauss(ImageGpu32sC4& dst, const ImageGpu32sC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC4> tmp_imp);
template void filterGauss(ImageGpu32fC1& dst, const ImageGpu32fC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC1> tmp_imp);
template void filterGauss(ImageGpu32fC2& dst, const ImageGpu32fC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC2> tmp_imp);
template void filterGauss(ImageGpu32fC4& dst, const ImageGpu32fC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC4> tmp_imp);
} // namespace cu
} // namespace imp
#endif // IMP_CU_GAUSS_IMPL_CU
| cb9aba6713cc94d23beff4aef713e0533da1556c.cu | #ifndef IMP_CU_GAUSS_IMPL_CU
#define IMP_CU_GAUSS_IMPL_CU
#include <imp/cu_imgproc/cu_image_filter.cuh>
#include <cstdint>
#include <cuda_runtime.h>
#include <imp/core/types.hpp>
#include <imp/core/roi.hpp>
#include <imp/cu_core/cu_image_gpu.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <imp/cu_core/cu_texture.cuh>
namespace imp {
namespace cu {
//-----------------------------------------------------------------------------
/** Perform a convolution with an gaussian smoothing kernel
* @param dst pointer to output image (linear memory)
* @param stride length of image row [pixels]
* @param xoff x-coordinate offset where to start the region [pixels]
* @param yoff y-coordinate offset where to start the region [pixels]
* @param width width of region [pixels]
* @param height height of region [pixels]
* @param kernel_size lenght of the smoothing kernel [pixels]
* @param horizontal defines the direction of convolution
*/
template<typename Pixel>
__global__ void k_gauss(Pixel* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height,
Texture2D src_tex, int kernel_size, float c0,
float c1, bool horizontal=true)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const size_t out_idx = y*stride+x;
if(x>=0 && y>= 0 && x<width && y<height)
{
x += xoff;
y += yoff;
float sum = 0.0f;
int half_kernel_elements = (kernel_size - 1) / 2;
Pixel texel_c, texel;
tex2DFetch(texel_c, src_tex, x, y);
if (horizontal)
{
// convolve horizontally
float g2 = c1 * c1;
sum = c0 * texel_c;
float sum_coeff = c0;
for (int i = 1; i <= half_kernel_elements; i++)
{
c0 *= c1;
c1 *= g2;
int cur_x = max(0, min(width-1, x+i));
tex2DFetch(texel, src_tex, cur_x, y);
sum += c0 * texel;
cur_x = max(0, min(width-1, x-i));
tex2DFetch(texel, src_tex, cur_x, y);
sum += c0 * texel;
sum_coeff += 2.0f*c0;
}
dst[out_idx] = sum/sum_coeff;
}
else
{
// convolve vertically
float g2 = c1 * c1;
sum = c0 * texel_c;
float sum_coeff = c0;
for (int j = 1; j <= half_kernel_elements; j++)
{
c0 *= c1;
c1 *= g2;
float cur_y = max(0, min(height-1, y+j));
tex2DFetch(texel, src_tex, x, cur_y);
sum += c0 * texel;
cur_y = max(0, min(height-1, y-j));
tex2DFetch(texel, src_tex, x, cur_y);
sum += c0 * texel;
sum_coeff += 2.0f*c0;
}
dst[out_idx] = sum/sum_coeff;
}
}
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterGauss(ImageGpu<Pixel, pixel_type>& dst,
const Texture2D& src_tex,
float sigma, int kernel_size,
ImageGpuPtr<Pixel, pixel_type> tmp_img)
// cudaStream_t stream);
{
Roi2u roi = dst.roi();
if (kernel_size == 0)
kernel_size = max(5, static_cast<int>(std::ceil(sigma*3)*2 + 1));
if (kernel_size % 2 == 0)
++kernel_size;
// temporary variable for filtering (separabel kernel!)
if (!tmp_img || dst.roi().size() != tmp_img->size());
{
tmp_img.reset(new ImageGpu<Pixel, pixel_type>(roi.size()));
}
// fragmentation
Fragmentation<> frag(roi);
float c0 = 1.0f / (std::sqrt(2.0f * M_PI)*sigma);
float c1 = std::exp(-0.5f / (sigma * sigma));
// Convolve horizontally
k_gauss
<<<
frag.dimGrid, frag.dimBlock//, 0, stream
>>> (tmp_img->data(), tmp_img->stride(),
roi.x(), roi.y(), tmp_img->width(), tmp_img->height(),
src_tex, /*sigma, */kernel_size, c0, c1, false);
IMP_CUDA_CHECK();
std::shared_ptr<Texture2D> tmp_tex =
tmp_img->genTexture(false,(tmp_img->bitDepth()<32) ? cudaFilterModePoint
: cudaFilterModeLinear);
IMP_CUDA_CHECK();
// Convolve vertically
k_gauss
<<<
frag.dimGrid, frag.dimBlock//, 0, stream
>>> (dst.data(), dst.stride(),
roi.x(), roi.y(), roi.width(), roi.height(),
*tmp_tex, /*sigma, */kernel_size, c0, c1, true);
IMP_CUDA_CHECK();
}
//-----------------------------------------------------------------------------
template<typename Pixel, imp::PixelType pixel_type>
void filterGauss(ImageGpu<Pixel, pixel_type>& dst,
const ImageGpu<Pixel, pixel_type>& src,
float sigma, int kernel_size,
ImageGpuPtr<Pixel, pixel_type> tmp_img)
// cudaStream_t stream);
{
std::shared_ptr<Texture2D> src_tex =
src.genTexture(false,(src.bitDepth()<32) ? cudaFilterModePoint
: cudaFilterModeLinear);
imp::Roi2u roi = src.roi();
if (dst.roi().size() != roi.size())
dst.setRoi(roi);
imp::cu::filterGauss(dst, *src_tex, sigma, kernel_size, tmp_img);
IMP_CUDA_CHECK();
}
//==============================================================================
//
// template instantiations for all our ima ge types
//
template void filterGauss(ImageGpu8uC1& dst, const ImageGpu8uC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC1> tmp_imp);
template void filterGauss(ImageGpu8uC2& dst, const ImageGpu8uC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC2> tmp_imp);
template void filterGauss(ImageGpu8uC4& dst, const ImageGpu8uC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu8uC4> tmp_imp);
template void filterGauss(ImageGpu16uC1& dst, const ImageGpu16uC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC1> tmp_imp);
template void filterGauss(ImageGpu16uC2& dst, const ImageGpu16uC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC2> tmp_imp);
template void filterGauss(ImageGpu16uC4& dst, const ImageGpu16uC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu16uC4> tmp_imp);
template void filterGauss(ImageGpu32sC1& dst, const ImageGpu32sC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC1> tmp_imp);
template void filterGauss(ImageGpu32sC2& dst, const ImageGpu32sC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC2> tmp_imp);
template void filterGauss(ImageGpu32sC4& dst, const ImageGpu32sC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32sC4> tmp_imp);
template void filterGauss(ImageGpu32fC1& dst, const ImageGpu32fC1& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC1> tmp_imp);
template void filterGauss(ImageGpu32fC2& dst, const ImageGpu32fC2& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC2> tmp_imp);
template void filterGauss(ImageGpu32fC4& dst, const ImageGpu32fC4& src, float sigma, int kernel_size, std::shared_ptr<ImageGpu32fC4> tmp_imp);
} // namespace cu
} // namespace imp
#endif // IMP_CU_GAUSS_IMPL_CU
|
2f1d1d2f5a459ae7b05135392b385ab341debb46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <device_launch_parameters.h>
#define Infinity 65536
#define index(x,y,z) (z+y*x)
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(hipError_t err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (hipSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
err = hipDeviceSynchronize();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
using namespace std;
const int n_iterations = 300;
const double initial_pheromone = 1.0;
const double evap_rate = 0.5;
const double ALFA = 1;
const double BETA = 2;
const double GAMA = 2;
int *load_adjacency_matrix(char const *filename, int &n_cities);
int calculate_tourcost(int *distances, int *costs,int *path, int n_cities);
int calculate_tourcost_single(int *distances,int *path, int n_cities);
int *optimal_solution(int *tours, int *distances,int *costs, int n_ants, int n_cities);
void evaporate(double *pheromones, int n_cities);
void pheromone_update(double *pheromones, int *distances, int *costs,int*min_path, int n_cities);
int *aco_cuda(int *distances, int *costs,int n_cities, int n_ants);
__global__ void cuda_evaporate(double *pheromones, int n_cities, double evap_rate);
__global__ void cuda_pheromone_update(double *pheromones, int *distances, int*costs,int *path, int n_cities, double amount);
__global__ void cuda_path_traverse(int *tours, int *visited, double *choiceinfo, double *probs, int n_cities);
int main()
{
srand((unsigned)time(NULL));
char const *inputfile, *outputfile,*cinputfile;
inputfile = "distance.txt";
outputfile = "Output.txt";
cinputfile = "cost.txt";
int n_cities;
int *distances;
int *costs;
distances = load_adjacency_matrix(inputfile, n_cities);
costs= load_adjacency_matrix(cinputfile, n_cities);
int *solution = aco_cuda(distances, costs, n_cities, n_cities);
int cost = calculate_tourcost_single(distances, solution, n_cities);
int cost2 = calculate_tourcost_single(costs, solution, n_cities);
ofstream output;
output.open(outputfile);
output << "Total cost of traversal: " << cost <<"\t"<<"\nTotal expenditure: "<<cost2<<endl;
output << "Best Solution Path:\n";
for (int i = 0; i < n_cities; i++)
output << solution[i] << endl;
output << solution[0] << endl;
cout << "CUDA ACO is complete" << endl;
return 0;
}
__global__ void cuda_evaporate(double *pheromones, int n_cities, double evap_rate)
{
int edge_id = threadIdx.x + blockIdx.x*blockDim.x;
pheromones[edge_id] *= evap_rate;
}
__global__ void cuda_pheromone_update(double *pheromones, int *distances,int *cost, int *path, int n_cities, double amount)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int first = path[id];
int second = path[id + 1];
pheromones[index(n_cities, first, second)] += amount;
pheromones[index(n_cities, second, first)] += amount;
}
__global__ void cuda_path_traverse(int *tours, int *visited, double *choiceinfo, double *probs, int n_cities)
{
int line_id = blockDim.x*blockIdx.x + threadIdx.x;
for (int step = 1; step < n_cities; step++)
{
int current = tours[index(n_cities, line_id, step - 1)];
double total_prob = 0.0;
for (int i = 0; i < n_cities; i++)
{
if (visited[index(n_cities, line_id, i)] == 1)
probs[index(n_cities, line_id, i)] = 0.0;
else {
double current_prob = choiceinfo[index(n_cities, current, i)];
probs[index(n_cities, line_id, i)] = current_prob;
total_prob += current_prob;
}
}
double random;
hiprandState_t state;
hiprand_init((unsigned long long) clock(), 0, 0, &state);
random = hiprand_uniform(&state);
random *= total_prob;
int next;
double sum = probs[index(n_cities, line_id, 0)];
for (next = 0; sum < random; next++)
{
sum += probs[index(n_cities, line_id, next + 1)];
}
tours[index(n_cities, line_id, step)] = next;
visited[index(n_cities, line_id, next)] = 1;
}
}
int *load_adjacency_matrix(char const *filename, int &n_cities)
{
ifstream adj_matrix;
adj_matrix.open(filename);
adj_matrix >> n_cities;
int* distances = (int *)malloc(n_cities*n_cities * sizeof(int));
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
adj_matrix >> distances[index(n_cities, i, j)];
return distances;
}
int calculate_tourcost(int *distances, int *costs,int *path, int n_cities)
{
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
{
cost += distances[index(n_cities, path[i], path[i + 1])];
cost += costs[index(n_cities, path[i], path[i + 1])];
}
cost += distances[index(n_cities, path[n_cities - 1], path[0])];
cost += costs[index(n_cities, path[n_cities - 1], path[0])];
return cost;
}
int calculate_tourcost_single(int *distances, int *path, int n_cities)
{
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
{
cost += distances[index(n_cities, path[i], path[i + 1])];
}
cost += distances[index(n_cities, path[n_cities-1], path[0])];
return cost;
}
int *optimal_solution(int *tours, int *distances,int *costs, int n_ants, int n_cities)
{
int *best_tour = &tours[0];
for (int tour = 0; tour < n_ants; tour++)
if (calculate_tourcost(distances,costs, &tours[index(n_cities, tour, 0)], n_cities) < calculate_tourcost(distances,costs, best_tour, n_cities))
best_tour = &tours[index(n_cities, tour, 0)];
return best_tour;
}
void evaporate(double *pheromones, int n_cities)
{
int size = n_cities * n_cities * sizeof(double);
double *pheromones_device;
CudaSafeCall(hipMalloc((void**)&pheromones_device, size));
hipMemcpy(pheromones_device, pheromones, size, hipMemcpyHostToDevice);
cuda_evaporate << < n_cities, n_cities >> > (pheromones_device, n_cities, evap_rate);
CudaCheckError();
hipMemcpy(pheromones, pheromones_device, size, hipMemcpyDeviceToHost);
hipFree(pheromones_device);
}
void pheromone_update(double *pheromones, int *distances,int *costs, int *path, int n_cities)
{
double amount = (double)(((double)(1.0f / (double)calculate_tourcost_single(distances,path, n_cities)))+((double)(1.0f / (double)calculate_tourcost_single(distances, path, n_cities))));
int size_path = n_cities * sizeof(int);
int size_int = n_cities * n_cities * sizeof(int);
int size_double = n_cities * n_cities * sizeof(double);
int *path_device;
int *distances_device;
int *costs_device;
double *pheromones_device;
CudaSafeCall(hipMalloc((void**)&path_device, size_path));
CudaSafeCall(hipMalloc((void**)&distances_device, size_int));
CudaSafeCall(hipMalloc((void**)&costs_device, size_int));
CudaSafeCall(hipMalloc((void**)&pheromones_device, size_double));
hipMemcpy(path_device, path, size_path, hipMemcpyHostToDevice);
hipMemcpy(distances_device, distances, size_int, hipMemcpyHostToDevice);
hipMemcpy(costs_device, costs, size_int, hipMemcpyHostToDevice);
hipMemcpy(pheromones_device, pheromones, size_double, hipMemcpyHostToDevice);
cuda_pheromone_update << < 1, n_cities - 1 >> > (pheromones_device, distances_device,costs_device, path_device, n_cities, amount);
CudaCheckError();
hipMemcpy(distances, distances_device, size_int, hipMemcpyDeviceToHost);
hipMemcpy(costs, costs_device, size_int, hipMemcpyDeviceToHost);
hipMemcpy(pheromones, pheromones_device, size_double, hipMemcpyDeviceToHost);
hipFree(path_device);
hipFree(distances_device);
hipFree(pheromones_device);
}
int *aco_cuda(int *distances, int *costs, int n_cities, int n_ants)
{
int ph_size = n_cities * n_cities * sizeof(double);
int tours_size = n_ants * n_cities * sizeof(int);
int dist_size = n_cities * n_cities * sizeof(int);
double *pheromones = (double*)malloc(ph_size);
int *tours = (int*)malloc(tours_size);
int *visited = (int*)malloc(tours_size);
double *choiceinfo = (double*)malloc(ph_size);
int *distances_device;
int *costs_device;
int *tours_device;
int *visited_device;
double *choiceinfo_device;
double *probs;
CudaSafeCall(hipMalloc((void**)&distances_device, dist_size));
CudaSafeCall(hipMalloc((void**)&costs_device, dist_size));
CudaSafeCall(hipMalloc((void**)&tours_device, tours_size));
CudaSafeCall(hipMalloc((void**)&visited_device, tours_size));
CudaSafeCall(hipMalloc((void**)&choiceinfo_device, ph_size));
CudaSafeCall(hipMalloc((void**)&probs, ph_size));
hipMemcpy(distances_device, distances, dist_size, hipMemcpyHostToDevice);
hipMemcpy(costs_device, costs, dist_size, hipMemcpyHostToDevice);
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
pheromones[index(n_cities, i, j)] = initial_pheromone;
for (int iteration = 0; iteration < n_iterations; iteration++)
{
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
tours[index(n_cities, i, j)] = Infinity;
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
visited[index(n_cities, i, j)] = 0;
for (int i = 0; i < n_cities; i++)
{
for (int j = 0; j < n_cities; j++)
{
double edge_pherom = pheromones[index(n_cities, i, j)];
double edge_weight = distances[index(n_cities, i, j)];
double cost_weight = costs[index(n_cities, i, j)];
double prob = 0.0f;
if (edge_weight != 0.0f)
{
prob = pow(edge_pherom, ALFA)*pow(1 / edge_weight, BETA)*pow(1 / cost_weight, GAMA);
}
else
{
prob = pow(edge_pherom, ALFA)*pow(Infinity, BETA);
}
choiceinfo[index(n_cities, i, j)] = prob;
}
}
hipMemcpy(choiceinfo_device, choiceinfo, ph_size, hipMemcpyHostToDevice);
for (int ant = 0; ant < n_ants; ant++)
{
int step = 0;
int init = rand() % n_cities;
tours[index(n_cities, ant, step)] = init;
visited[index(n_cities, ant, init)] = 1;
}
hipMemcpy(visited_device, visited, tours_size, hipMemcpyHostToDevice);
hipMemcpy(tours_device, tours, tours_size, hipMemcpyHostToDevice);
cuda_path_traverse << < 1,n_cities >> > (tours_device, visited_device, choiceinfo_device, probs, n_cities);
CudaCheckError();
hipMemcpy(tours, tours_device, tours_size, hipMemcpyDeviceToHost);
hipMemcpy(visited, visited_device, tours_size, hipMemcpyDeviceToHost);
evaporate(pheromones, n_cities);
int *best = optimal_solution(tours, distances, costs, n_ants, n_cities);
pheromone_update(pheromones, distances,costs, best, n_cities);
}
hipFree(distances_device);
hipFree(tours_device);
hipFree(visited_device);
hipFree(choiceinfo_device);
hipFree(probs);
int *best = optimal_solution(tours, distances,costs, n_ants, n_cities);
return best;
} | 2f1d1d2f5a459ae7b05135392b385ab341debb46.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <curand.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#define Infinity 65536
#define index(x,y,z) (z+y*x)
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
err = cudaDeviceSynchronize();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
using namespace std;
const int n_iterations = 300;
const double initial_pheromone = 1.0;
const double evap_rate = 0.5;
const double ALFA = 1;
const double BETA = 2;
const double GAMA = 2;
int *load_adjacency_matrix(char const *filename, int &n_cities);
int calculate_tourcost(int *distances, int *costs,int *path, int n_cities);
int calculate_tourcost_single(int *distances,int *path, int n_cities);
int *optimal_solution(int *tours, int *distances,int *costs, int n_ants, int n_cities);
void evaporate(double *pheromones, int n_cities);
void pheromone_update(double *pheromones, int *distances, int *costs,int*min_path, int n_cities);
int *aco_cuda(int *distances, int *costs,int n_cities, int n_ants);
__global__ void cuda_evaporate(double *pheromones, int n_cities, double evap_rate);
__global__ void cuda_pheromone_update(double *pheromones, int *distances, int*costs,int *path, int n_cities, double amount);
__global__ void cuda_path_traverse(int *tours, int *visited, double *choiceinfo, double *probs, int n_cities);
int main()
{
srand((unsigned)time(NULL));
char const *inputfile, *outputfile,*cinputfile;
inputfile = "distance.txt";
outputfile = "Output.txt";
cinputfile = "cost.txt";
int n_cities;
int *distances;
int *costs;
distances = load_adjacency_matrix(inputfile, n_cities);
costs= load_adjacency_matrix(cinputfile, n_cities);
int *solution = aco_cuda(distances, costs, n_cities, n_cities);
int cost = calculate_tourcost_single(distances, solution, n_cities);
int cost2 = calculate_tourcost_single(costs, solution, n_cities);
ofstream output;
output.open(outputfile);
output << "Total cost of traversal: " << cost <<"\t"<<"\nTotal expenditure: "<<cost2<<endl;
output << "Best Solution Path:\n";
for (int i = 0; i < n_cities; i++)
output << solution[i] << endl;
output << solution[0] << endl;
cout << "CUDA ACO is complete" << endl;
return 0;
}
__global__ void cuda_evaporate(double *pheromones, int n_cities, double evap_rate)
{
int edge_id = threadIdx.x + blockIdx.x*blockDim.x;
pheromones[edge_id] *= evap_rate;
}
__global__ void cuda_pheromone_update(double *pheromones, int *distances,int *cost, int *path, int n_cities, double amount)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int first = path[id];
int second = path[id + 1];
pheromones[index(n_cities, first, second)] += amount;
pheromones[index(n_cities, second, first)] += amount;
}
__global__ void cuda_path_traverse(int *tours, int *visited, double *choiceinfo, double *probs, int n_cities)
{
int line_id = blockDim.x*blockIdx.x + threadIdx.x;
for (int step = 1; step < n_cities; step++)
{
int current = tours[index(n_cities, line_id, step - 1)];
double total_prob = 0.0;
for (int i = 0; i < n_cities; i++)
{
if (visited[index(n_cities, line_id, i)] == 1)
probs[index(n_cities, line_id, i)] = 0.0;
else {
double current_prob = choiceinfo[index(n_cities, current, i)];
probs[index(n_cities, line_id, i)] = current_prob;
total_prob += current_prob;
}
}
double random;
curandState_t state;
curand_init((unsigned long long) clock(), 0, 0, &state);
random = curand_uniform(&state);
random *= total_prob;
int next;
double sum = probs[index(n_cities, line_id, 0)];
for (next = 0; sum < random; next++)
{
sum += probs[index(n_cities, line_id, next + 1)];
}
tours[index(n_cities, line_id, step)] = next;
visited[index(n_cities, line_id, next)] = 1;
}
}
int *load_adjacency_matrix(char const *filename, int &n_cities)
{
ifstream adj_matrix;
adj_matrix.open(filename);
adj_matrix >> n_cities;
int* distances = (int *)malloc(n_cities*n_cities * sizeof(int));
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
adj_matrix >> distances[index(n_cities, i, j)];
return distances;
}
int calculate_tourcost(int *distances, int *costs,int *path, int n_cities)
{
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
{
cost += distances[index(n_cities, path[i], path[i + 1])];
cost += costs[index(n_cities, path[i], path[i + 1])];
}
cost += distances[index(n_cities, path[n_cities - 1], path[0])];
cost += costs[index(n_cities, path[n_cities - 1], path[0])];
return cost;
}
int calculate_tourcost_single(int *distances, int *path, int n_cities)
{
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
{
cost += distances[index(n_cities, path[i], path[i + 1])];
}
cost += distances[index(n_cities, path[n_cities-1], path[0])];
return cost;
}
int *optimal_solution(int *tours, int *distances,int *costs, int n_ants, int n_cities)
{
int *best_tour = &tours[0];
for (int tour = 0; tour < n_ants; tour++)
if (calculate_tourcost(distances,costs, &tours[index(n_cities, tour, 0)], n_cities) < calculate_tourcost(distances,costs, best_tour, n_cities))
best_tour = &tours[index(n_cities, tour, 0)];
return best_tour;
}
void evaporate(double *pheromones, int n_cities)
{
int size = n_cities * n_cities * sizeof(double);
double *pheromones_device;
CudaSafeCall(cudaMalloc((void**)&pheromones_device, size));
cudaMemcpy(pheromones_device, pheromones, size, cudaMemcpyHostToDevice);
cuda_evaporate << < n_cities, n_cities >> > (pheromones_device, n_cities, evap_rate);
CudaCheckError();
cudaMemcpy(pheromones, pheromones_device, size, cudaMemcpyDeviceToHost);
cudaFree(pheromones_device);
}
void pheromone_update(double *pheromones, int *distances,int *costs, int *path, int n_cities)
{
double amount = (double)(((double)(1.0f / (double)calculate_tourcost_single(distances,path, n_cities)))+((double)(1.0f / (double)calculate_tourcost_single(distances, path, n_cities))));
int size_path = n_cities * sizeof(int);
int size_int = n_cities * n_cities * sizeof(int);
int size_double = n_cities * n_cities * sizeof(double);
int *path_device;
int *distances_device;
int *costs_device;
double *pheromones_device;
CudaSafeCall(cudaMalloc((void**)&path_device, size_path));
CudaSafeCall(cudaMalloc((void**)&distances_device, size_int));
CudaSafeCall(cudaMalloc((void**)&costs_device, size_int));
CudaSafeCall(cudaMalloc((void**)&pheromones_device, size_double));
cudaMemcpy(path_device, path, size_path, cudaMemcpyHostToDevice);
cudaMemcpy(distances_device, distances, size_int, cudaMemcpyHostToDevice);
cudaMemcpy(costs_device, costs, size_int, cudaMemcpyHostToDevice);
cudaMemcpy(pheromones_device, pheromones, size_double, cudaMemcpyHostToDevice);
cuda_pheromone_update << < 1, n_cities - 1 >> > (pheromones_device, distances_device,costs_device, path_device, n_cities, amount);
CudaCheckError();
cudaMemcpy(distances, distances_device, size_int, cudaMemcpyDeviceToHost);
cudaMemcpy(costs, costs_device, size_int, cudaMemcpyDeviceToHost);
cudaMemcpy(pheromones, pheromones_device, size_double, cudaMemcpyDeviceToHost);
cudaFree(path_device);
cudaFree(distances_device);
cudaFree(pheromones_device);
}
int *aco_cuda(int *distances, int *costs, int n_cities, int n_ants)
{
int ph_size = n_cities * n_cities * sizeof(double);
int tours_size = n_ants * n_cities * sizeof(int);
int dist_size = n_cities * n_cities * sizeof(int);
double *pheromones = (double*)malloc(ph_size);
int *tours = (int*)malloc(tours_size);
int *visited = (int*)malloc(tours_size);
double *choiceinfo = (double*)malloc(ph_size);
int *distances_device;
int *costs_device;
int *tours_device;
int *visited_device;
double *choiceinfo_device;
double *probs;
CudaSafeCall(cudaMalloc((void**)&distances_device, dist_size));
CudaSafeCall(cudaMalloc((void**)&costs_device, dist_size));
CudaSafeCall(cudaMalloc((void**)&tours_device, tours_size));
CudaSafeCall(cudaMalloc((void**)&visited_device, tours_size));
CudaSafeCall(cudaMalloc((void**)&choiceinfo_device, ph_size));
CudaSafeCall(cudaMalloc((void**)&probs, ph_size));
cudaMemcpy(distances_device, distances, dist_size, cudaMemcpyHostToDevice);
cudaMemcpy(costs_device, costs, dist_size, cudaMemcpyHostToDevice);
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
pheromones[index(n_cities, i, j)] = initial_pheromone;
for (int iteration = 0; iteration < n_iterations; iteration++)
{
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
tours[index(n_cities, i, j)] = Infinity;
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
visited[index(n_cities, i, j)] = 0;
for (int i = 0; i < n_cities; i++)
{
for (int j = 0; j < n_cities; j++)
{
double edge_pherom = pheromones[index(n_cities, i, j)];
double edge_weight = distances[index(n_cities, i, j)];
double cost_weight = costs[index(n_cities, i, j)];
double prob = 0.0f;
if (edge_weight != 0.0f)
{
prob = pow(edge_pherom, ALFA)*pow(1 / edge_weight, BETA)*pow(1 / cost_weight, GAMA);
}
else
{
prob = pow(edge_pherom, ALFA)*pow(Infinity, BETA);
}
choiceinfo[index(n_cities, i, j)] = prob;
}
}
cudaMemcpy(choiceinfo_device, choiceinfo, ph_size, cudaMemcpyHostToDevice);
for (int ant = 0; ant < n_ants; ant++)
{
int step = 0;
int init = rand() % n_cities;
tours[index(n_cities, ant, step)] = init;
visited[index(n_cities, ant, init)] = 1;
}
cudaMemcpy(visited_device, visited, tours_size, cudaMemcpyHostToDevice);
cudaMemcpy(tours_device, tours, tours_size, cudaMemcpyHostToDevice);
cuda_path_traverse << < 1,n_cities >> > (tours_device, visited_device, choiceinfo_device, probs, n_cities);
CudaCheckError();
cudaMemcpy(tours, tours_device, tours_size, cudaMemcpyDeviceToHost);
cudaMemcpy(visited, visited_device, tours_size, cudaMemcpyDeviceToHost);
evaporate(pheromones, n_cities);
int *best = optimal_solution(tours, distances, costs, n_ants, n_cities);
pheromone_update(pheromones, distances,costs, best, n_cities);
}
cudaFree(distances_device);
cudaFree(tours_device);
cudaFree(visited_device);
cudaFree(choiceinfo_device);
cudaFree(probs);
int *best = optimal_solution(tours, distances,costs, n_ants, n_cities);
return best;
} |
b5a3a3e55d18c21403cf068e2e49ad31f69a68c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha
// =============================================================================
//
// Base class for processing proximity in fsi system.//
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
/**
* @brief calcHashD
* @details
* 1. Get particle index. Determine by the block and thread we are
* in.
* 2. From x,y,z position determine which bin it is in.
* 3. Calculate hash from bin index.
* 4. Store hash and particle index associated with it.
*
* @param gridMarkerHashD
* @param gridMarkerIndexD
* @param posRad
* @param numAllMarkers
*/
/// calcHashD :
/// 1. Get particle index.Determine by the block and thread we are in.
/// 2. From x, y, z position determine which bin it is in.
/// 3. Calculate hash from bin index.
/// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, ///< gridMarkerHash Store marker hash here
uint* gridMarkerIndexD, ///< gridMarkerIndex Store marker index here
Real4* posRad, ///< posRad Vector containing the positions of all particles, including boundary particles
const size_t numAllMarkers, ///< Total number of markers (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"SDKCollisionSystem.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from SDKCollisionSystem.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from SDKCollisionSystem.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See SDKCollisionSystem.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD,
Real3* sortedTauXxYyZzD, // output: sorted shear stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted shear stress xyzxyz
Real3* tauXxYyZzD, //
Real3* tauXyXzYzD, //
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
uint* mapOriginalToSorted, // mapOriginalToSorted[originalIndex] =
// originalIndex
Real4* posRadD, // input: sorted position array
Real3* velMasD, // input: sorted velocity array
Real4* rhoPresMuD,
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
mapOriginalToSorted[index] = index; // will be sorted outside. Alternatively, you could have
// mapOriginalToSorted[originalIndex] = index; without need to sort. But
// that
// is not thread safe
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
// printf("ChCollisionSystemFsi::ChCollisionSystemFsi constructor\n");
sphMarkersD = NULL;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Finalize() {
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::~ChCollisionSystemFsi() {
// TODO
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
// printf("Neighbor search on numObjectsH->numAllMarkers=%d makers\n", numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads), 0, 0, U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD), mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
hipDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
hipFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); //?$ 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(markersProximityD->mapOriginalToSorted), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
// unroll sorted index to have the location of original particles in the
// sorted arrays
thrust::device_vector<uint> dummyIndex = markersProximityD->gridMarkerIndexD;
thrust::sort_by_key(dummyIndex.begin(), dummyIndex.end(), markersProximityD->mapOriginalToSorted.begin());
dummyIndex.clear();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
| b5a3a3e55d18c21403cf068e2e49ad31f69a68c3.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha
// =============================================================================
//
// Base class for processing proximity in fsi system.//
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
/**
* @brief calcHashD
* @details
* 1. Get particle index. Determine by the block and thread we are
* in.
* 2. From x,y,z position determine which bin it is in.
* 3. Calculate hash from bin index.
* 4. Store hash and particle index associated with it.
*
* @param gridMarkerHashD
* @param gridMarkerIndexD
* @param posRad
* @param numAllMarkers
*/
/// calcHashD :
/// 1. Get particle index.Determine by the block and thread we are in.
/// 2. From x, y, z position determine which bin it is in.
/// 3. Calculate hash from bin index.
/// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, ///< gridMarkerHash Store marker hash here
uint* gridMarkerIndexD, ///< gridMarkerIndex Store marker index here
Real4* posRad, ///< posRad Vector containing the positions of all particles, including boundary particles
const size_t numAllMarkers, ///< Total number of markers (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"SDKCollisionSystem.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from SDKCollisionSystem.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from SDKCollisionSystem.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See SDKCollisionSystem.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD,
Real3* sortedTauXxYyZzD, // output: sorted shear stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted shear stress xyzxyz
Real3* tauXxYyZzD, //
Real3* tauXyXzYzD, //
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
uint* mapOriginalToSorted, // mapOriginalToSorted[originalIndex] =
// originalIndex
Real4* posRadD, // input: sorted position array
Real3* velMasD, // input: sorted velocity array
Real4* rhoPresMuD,
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
mapOriginalToSorted[index] = index; // will be sorted outside. Alternatively, you could have
// mapOriginalToSorted[originalIndex] = index; without need to sort. But
// that
// is not thread safe
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"SDKCollisionSystem.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
// printf("ChCollisionSystemFsi::ChCollisionSystemFsi constructor\n");
sphMarkersD = NULL;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Finalize() {
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::~ChCollisionSystemFsi() {
// TODO
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
// printf("Neighbor search on numObjectsH->numAllMarkers=%d makers\n", numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
calcHashD<<<numBlocks, numThreads>>>(U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD), mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
cudaDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
cudaFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); //?$ 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
reorderDataAndFindCellStartD<<<numBlocks, numThreads, smemSize>>>(
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(markersProximityD->mapOriginalToSorted), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
// unroll sorted index to have the location of original particles in the
// sorted arrays
thrust::device_vector<uint> dummyIndex = markersProximityD->gridMarkerIndexD;
thrust::sort_by_key(dummyIndex.begin(), dummyIndex.end(), markersProximityD->mapOriginalToSorted.begin());
dummyIndex.clear();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
|
109468a1268058349e1d800eb683fbdbb5cd3e6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
hipMalloc(&a, size);
hipMalloc(&b, size);
hipMalloc(&c, size);
hipHostMalloc(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream1, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream2, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream3, 0, c, N);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
hipMemcpy(h_c, c, size, hipMemcpyDeviceToHost);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipFree(a);
hipFree(b);
hipFree(c);
hipHostFree(h_c);
}
| 109468a1268058349e1d800eb683fbdbb5cd3e6f.cu | #include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
cudaMalloc(&a, size);
cudaMalloc(&b, size);
cudaMalloc(&c, size);
cudaMallocHost(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
cudaMemcpy(h_c, c, size, cudaMemcpyDeviceToHost);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFreeHost(h_c);
}
|
8bd0574e6f267fc4797e65f300f298dbf3b8713c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////
/// INCLUDES
////////////////////////////////////////////////////////////
#include "TensorCUDA.hpp"
#include <stdio.h>
#include "Macros.hpp"
#include "ensure.hpp"
#include <memory>
////////////////////////////////////////////////////////////
/// NAMESPACE AI
////////////////////////////////////////////////////////////
namespace ai
{
////////////////////////////////////////////////////////////
/// ERROR HANDLING
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void HandleError(hipError_t err, const char* file, int line)
{
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__))
////////////////////////////////////////////////////////////
/// UTIL
////////////////////////////////////////////////////////////
//Get nearest lower power of two
unsigned int low_pow2 (unsigned int x)
{
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return x - (x >> 1);
}
//Get nearest higher power of two
unsigned long high_pow2(unsigned long v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
////////////////////////////////////////////////////////////
/// KERNELS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill(float* t, float val, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] = val;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill_random(float* t, float mean, float dev, unsigned int seed, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tseed = seed * (tid + 1);
while (tid < size) {
tseed ^= tseed << 13;
tseed ^= tseed >> 17;
tseed ^= tseed << 5;
t[tid] = mean - dev + ((float)tseed / UINT_MAX) * dev * 2.f;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_scale(float* t, float factor, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] *= factor;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_diff(float* t1, float* t2, float* tout, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
tout[tid] = t1[tid] - t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_add(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] += t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_copy(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] = t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
/// TENSOR GPU
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA()
{
_data = NULL;
_size = 0;
_depth = _height = _width = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(const TensorCUDA<T>& t)
{
point(t);
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width)
{
_width = width;
_height = 1;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T) ));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height)
{
_width = width;
_height = height;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height, int depth)
{
_width = width;
_height = height;
_depth = depth;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::~TensorCUDA()
{
clear();
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::load(ai::IOData& data, std::string dataname)
{
clear();
IOData* node_width = data.findNode(dataname + "_width");
IOData* node_height = data.findNode(dataname + "_height");
IOData* node_depth = data.findNode(dataname + "_depth");
IOData* node_data = data.findNode(dataname + "_data");
ensure(node_width != NULL);
ensure(node_height != NULL);
ensure(node_depth != NULL);
ensure(node_data != NULL);
node_width->get(_width);
node_height->get(_height);
node_depth->get(_depth);
_size = _width * _height * _depth;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
node_data->get(reinterpret_cast<char*>(&tmp.get()[0]));
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::load(std::ifstream& file)
{
clear();
file.read(reinterpret_cast<char*>(&_size), sizeof(_size));
file.read(reinterpret_cast<char*>(&_width), sizeof(_width));
file.read(reinterpret_cast<char*>(&_height), sizeof(_height));
file.read(reinterpret_cast<char*>(&_depth), sizeof(_depth));
_owner = true;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
file.read(reinterpret_cast<char*>(&tmp.get()[0]), sizeof(T) * _size);
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::save(ai::IOData& data, std::string dataname)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
data.pushNode(dataname + "_width", _width);
data.pushNode(dataname + "_height", _height);
data.pushNode(dataname + "_depth", _depth);
data.pushNode(dataname + "_data", reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::save(std::ofstream& file)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
file.write(reinterpret_cast<char*>(&_size), sizeof(_size));
file.write(reinterpret_cast<char*>(&_width), sizeof(_width));
file.write(reinterpret_cast<char*>(&_height), sizeof(_height));
file.write(reinterpret_cast<char*>(&_depth), sizeof(_depth));
file.write(reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width)
{
ensure(width > 0);
clear();
_width = width;
_height = 1;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height)
{
ensure(width > 0 && height > 0);
clear();
_width = width;
_height = height;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height, const int depth)
{
ensure(width > 0 && height > 0 && depth > 0);
clear();
_width = width;
_height = height;
_depth = depth;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(Tensor<T>& host_tensor)
{
clear();
_width = host_tensor.width();
_height = host_tensor.height();
_depth = host_tensor.depth();
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( hipMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t)
{
clear();
_data = t._data;
_size = t._width * t._height * t._depth;
_width = t._width;
_height = t._height;
_depth = t._depth;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d)
{
clear();
_data = &t._data[offset_d * t._width * t._height];
_size = t._width * t._height;
_width = t._width;
_height = t._height;
_depth = 1;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d, const unsigned int offset_y)
{
clear();
_data = &t._data[offset_d * t._width * t._height + offset_y * t._width];
_size = t._width;
_width = t._width;
_height = 1;
_depth = 1;
_owner = false;
}
//////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::clear()
{
if (_data != NULL && _size != 0 && _owner == true)
HANDLE_ERROR( hipFree(_data) );
_data = NULL;
_size = 0;
_width = 0;
_height = 0;
_depth = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(T val)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++) temp[i] = val;
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(float mean, float dev)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++)
temp[i] = (T)( mean - dev + ((double)rand() / RAND_MAX) * dev * 2.f);
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToHost(T *arr, int size) const
{
ensure(size <= _size);
HANDLE_ERROR( hipMemcpy( arr, _data, size * sizeof(T), hipMemcpyDeviceToHost));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToDevice(const T *arr, int size)
{
ensure(size <= _size);
HANDLE_ERROR( hipMemcpy(_data, arr, size * sizeof(T), hipMemcpyHostToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copy(const TensorCUDA<T>& tensor)
{
if (width() != tensor.width() || height() != tensor.height() || depth() != tensor.height())
setshape((int)tensor.width(), (int)tensor.height(), (int)tensor.depth());
HANDLE_ERROR( hipMemcpy(_data, tensor.pointer(), tensor.size() * sizeof(T), hipMemcpyDeviceToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d)
{
TensorCUDA<T> t;
t.point(*this, d);
return t;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d, const int y)
{
TensorCUDA<T> t;
t.point(*this, d, y);
return t;
}
////////////////////////////////////////////////////////////
/// TYPE SPECIFIC FUNCTIONS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float val)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_fill), dim3(_blocks), dim3(_threads), 0, 0, t.pointer(), val, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float mean, float dev)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_fill_random), dim3(_blocks), dim3(_threads), 0, 0, t.pointer(), mean, dev, rand(), t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_scale(TensorCUDA_float& t, float factor)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_scale), dim3(_blocks), dim3(_threads), 0, 0, t.pointer(), factor, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_diff(TensorCUDA_float& t1, TensorCUDA_float& t2, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t1.pointer() != NULL && t2.pointer() != NULL &&
(tout.size() == t1.size() && tout.size() == t2.size()));
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_diff), dim3(_blocks), dim3(_threads), 0, 0, t1.pointer(), t2.pointer(), tout.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_sum(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_add), dim3(_blocks), dim3(_threads), 0, 0, tout.pointer(), t.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_copy(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
hipLaunchKernelGGL(( knl_tensor_copy), dim3(_blocks), dim3(_threads), 0, 0, tout.pointer(), t.pointer(), tout.size());
}
//Specialization
template < > void TensorCUDA<float*>::fill(float mean, float dev) { }
//Explicit instantiations
template class TensorCUDA<float>;
template class TensorCUDA<float*>;
template class TensorCUDA<int>;
////////////////////////////////////////////////////////////
} //namespace ai
| 8bd0574e6f267fc4797e65f300f298dbf3b8713c.cu | ////////////////////////////////////////////////////////////
/// INCLUDES
////////////////////////////////////////////////////////////
#include "TensorCUDA.hpp"
#include <stdio.h>
#include "Macros.hpp"
#include "ensure.hpp"
#include <memory>
////////////////////////////////////////////////////////////
/// NAMESPACE AI
////////////////////////////////////////////////////////////
namespace ai
{
////////////////////////////////////////////////////////////
/// ERROR HANDLING
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void HandleError(cudaError_t err, const char* file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__))
////////////////////////////////////////////////////////////
/// UTIL
////////////////////////////////////////////////////////////
//Get nearest lower power of two
unsigned int low_pow2 (unsigned int x)
{
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return x - (x >> 1);
}
//Get nearest higher power of two
unsigned long high_pow2(unsigned long v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
////////////////////////////////////////////////////////////
/// KERNELS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill(float* t, float val, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] = val;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill_random(float* t, float mean, float dev, unsigned int seed, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tseed = seed * (tid + 1);
while (tid < size) {
tseed ^= tseed << 13;
tseed ^= tseed >> 17;
tseed ^= tseed << 5;
t[tid] = mean - dev + ((float)tseed / UINT_MAX) * dev * 2.f;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_scale(float* t, float factor, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] *= factor;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_diff(float* t1, float* t2, float* tout, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
tout[tid] = t1[tid] - t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_add(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] += t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_copy(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] = t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
/// TENSOR GPU
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA()
{
_data = NULL;
_size = 0;
_depth = _height = _width = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(const TensorCUDA<T>& t)
{
point(t);
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width)
{
_width = width;
_height = 1;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T) ));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height)
{
_width = width;
_height = height;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height, int depth)
{
_width = width;
_height = height;
_depth = depth;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::~TensorCUDA()
{
clear();
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::load(ai::IOData& data, std::string dataname)
{
clear();
IOData* node_width = data.findNode(dataname + "_width");
IOData* node_height = data.findNode(dataname + "_height");
IOData* node_depth = data.findNode(dataname + "_depth");
IOData* node_data = data.findNode(dataname + "_data");
ensure(node_width != NULL);
ensure(node_height != NULL);
ensure(node_depth != NULL);
ensure(node_data != NULL);
node_width->get(_width);
node_height->get(_height);
node_depth->get(_depth);
_size = _width * _height * _depth;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
node_data->get(reinterpret_cast<char*>(&tmp.get()[0]));
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::load(std::ifstream& file)
{
clear();
file.read(reinterpret_cast<char*>(&_size), sizeof(_size));
file.read(reinterpret_cast<char*>(&_width), sizeof(_width));
file.read(reinterpret_cast<char*>(&_height), sizeof(_height));
file.read(reinterpret_cast<char*>(&_depth), sizeof(_depth));
_owner = true;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
file.read(reinterpret_cast<char*>(&tmp.get()[0]), sizeof(T) * _size);
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::save(ai::IOData& data, std::string dataname)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
data.pushNode(dataname + "_width", _width);
data.pushNode(dataname + "_height", _height);
data.pushNode(dataname + "_depth", _depth);
data.pushNode(dataname + "_data", reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::save(std::ofstream& file)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
file.write(reinterpret_cast<char*>(&_size), sizeof(_size));
file.write(reinterpret_cast<char*>(&_width), sizeof(_width));
file.write(reinterpret_cast<char*>(&_height), sizeof(_height));
file.write(reinterpret_cast<char*>(&_depth), sizeof(_depth));
file.write(reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width)
{
ensure(width > 0);
clear();
_width = width;
_height = 1;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height)
{
ensure(width > 0 && height > 0);
clear();
_width = width;
_height = height;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height, const int depth)
{
ensure(width > 0 && height > 0 && depth > 0);
clear();
_width = width;
_height = height;
_depth = depth;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(Tensor<T>& host_tensor)
{
clear();
_width = host_tensor.width();
_height = host_tensor.height();
_depth = host_tensor.depth();
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t)
{
clear();
_data = t._data;
_size = t._width * t._height * t._depth;
_width = t._width;
_height = t._height;
_depth = t._depth;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d)
{
clear();
_data = &t._data[offset_d * t._width * t._height];
_size = t._width * t._height;
_width = t._width;
_height = t._height;
_depth = 1;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d, const unsigned int offset_y)
{
clear();
_data = &t._data[offset_d * t._width * t._height + offset_y * t._width];
_size = t._width;
_width = t._width;
_height = 1;
_depth = 1;
_owner = false;
}
//////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::clear()
{
if (_data != NULL && _size != 0 && _owner == true)
HANDLE_ERROR( cudaFree(_data) );
_data = NULL;
_size = 0;
_width = 0;
_height = 0;
_depth = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(T val)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++) temp[i] = val;
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(float mean, float dev)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++)
temp[i] = (T)( mean - dev + ((double)rand() / RAND_MAX) * dev * 2.f);
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToHost(T *arr, int size) const
{
ensure(size <= _size);
HANDLE_ERROR( cudaMemcpy( arr, _data, size * sizeof(T), cudaMemcpyDeviceToHost));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToDevice(const T *arr, int size)
{
ensure(size <= _size);
HANDLE_ERROR( cudaMemcpy(_data, arr, size * sizeof(T), cudaMemcpyHostToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copy(const TensorCUDA<T>& tensor)
{
if (width() != tensor.width() || height() != tensor.height() || depth() != tensor.height())
setshape((int)tensor.width(), (int)tensor.height(), (int)tensor.depth());
HANDLE_ERROR( cudaMemcpy(_data, tensor.pointer(), tensor.size() * sizeof(T), cudaMemcpyDeviceToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d)
{
TensorCUDA<T> t;
t.point(*this, d);
return t;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d, const int y)
{
TensorCUDA<T> t;
t.point(*this, d, y);
return t;
}
////////////////////////////////////////////////////////////
/// TYPE SPECIFIC FUNCTIONS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float val)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_fill<<<_blocks, _threads>>>(t.pointer(), val, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float mean, float dev)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_fill_random<<<_blocks, _threads>>>(t.pointer(), mean, dev, rand(), t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_scale(TensorCUDA_float& t, float factor)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_scale<<<_blocks, _threads>>>(t.pointer(), factor, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_diff(TensorCUDA_float& t1, TensorCUDA_float& t2, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t1.pointer() != NULL && t2.pointer() != NULL &&
(tout.size() == t1.size() && tout.size() == t2.size()));
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_diff<<<_blocks, _threads>>>(t1.pointer(), t2.pointer(), tout.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_sum(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_add<<<_blocks, _threads>>>(tout.pointer(), t.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_copy(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_copy<<<_blocks, _threads>>>(tout.pointer(), t.pointer(), tout.size());
}
//Specialization
template < > void TensorCUDA<float*>::fill(float mean, float dev) { }
//Explicit instantiations
template class TensorCUDA<float>;
template class TensorCUDA<float*>;
template class TensorCUDA<int>;
////////////////////////////////////////////////////////////
} //namespace ai
|
fe687574511374158d4f63d95177521a398b8dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <list>
#include "tbb/concurrent_queue.h"
#include "math.h"
#include "timestamp.hpp"
#include "AdaptativeUtils.hpp"
__device__ void csr_stream(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out, unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, int WG_SIZE){
float temp_sum = 0.;
// int WG_SIZE = 256;
const unsigned int numThreadsForRed = wg;
const unsigned int col = rowPtrs[row] + Tid;
if (Bid != (gridDim.x - 1))
{
for(int i = 0; i < BLOCKSIZE; i += WG_SIZE)
partialSums[Tid + i] = alpha * vals[col + i] * vec[cols[col + i]];
}
else
{
// This is required so that we stay in bounds for vals[] and cols[].
// Otherwise, if the matrix's endpoints don't line up with BLOCKSIZE,
// we will buffer overflow. On today's dGPUs, this doesn't cause problems.
// The values are within a dGPU's page, which is zeroed out on allocation.
// However, this may change in the future (e.g. with shared virtual memory.)
// This causes a minor performance loss because this is the last workgroup
// to be launched, and this loop can't be unrolled.
const unsigned int max_to_load = rowPtrs[stop_row] - rowPtrs[row];
for(int i = 0; i < max_to_load; i += WG_SIZE)
partialSums[Tid + i] = alpha * vals[col + i] * vec[cols[col + i]];
}
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
if(numThreadsForRed > 1)
{
const unsigned int local_row = row + (Tid >> (31 - __clz(numThreadsForRed)));
const unsigned int local_first_val = rowPtrs[local_row] - rowPtrs[row];
const unsigned int local_last_val = rowPtrs[local_row + 1] - rowPtrs[row];
const unsigned int threadInBlock = Tid & (numThreadsForRed - 1);
if(local_row < stop_row)
{
// This is dangerous -- will infinite loop if your last value is within
// numThreadsForRed of MAX_UINT. Noticable performance gain to avoid a
// long induction variable here, though.
for(unsigned int local_cur_val = local_first_val + threadInBlock; local_cur_val < local_last_val; local_cur_val += numThreadsForRed)
temp_sum += partialSums[local_cur_val] ; //temp_sum = two_sum(partialSums[local_cur_val], temp_sum, &sumk_e);
}
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
// Step one of this two-stage reduction is done. Now each row has {numThreadsForRed}
// values sitting in the local memory. This means that, roughly, the beginning of
// LDS is full up to {workgroup size} entries.
// Now we perform a parallel reduction that sums together the answers for each
// row in parallel, leaving us an answer in 'temp_sum' for each row.
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
if ( numThreadsForRed > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (threadInBlock == 0 && local_row < stop_row)
{
// All of our write-outs check to see if the output vector should first be zeroed.
// If so, just do a write rather than a read-write. Measured to be a slight (~5%)
// performance improvement.
if (beta != 0.)
// temp_sum = std::fma(beta, out[local_row], temp_sum);
out[local_row] = beta* out[local_row] + temp_sum;
else
out[local_row] = temp_sum;
}
}else{
// In this case, we want to have each thread perform the reduction for a single row.
// Essentially, this looks like performing CSR-Scalar, except it is computed out of local memory.
// However, this reduction is also much faster than CSR-Scalar, because local memory
// is designed for scatter-gather operations.
// We need a while loop because there may be more rows than threads in the WG.
unsigned int local_row = row + Tid;
while(local_row < stop_row)
{
int local_first_val = (rowPtrs[local_row] - rowPtrs[row]);
int local_last_val = rowPtrs[local_row + 1] - rowPtrs[row];
temp_sum = 0.;
for (int local_cur_val = local_first_val; local_cur_val < local_last_val; local_cur_val++)
temp_sum += partialSums[local_cur_val];
// After you've done the reduction into the temp_sum register,
// put that into the output for each row.
if (beta != 0.)
out[local_row] = beta* out[local_row] + temp_sum;
// temp_sum = two_fma(beta, out[local_row], temp_sum, &sumk_e);
else
out[local_row] = temp_sum;
//out[local_row] = temp_sum + sumk_e;
local_row += WG_SIZE;
}
}
}
__device__ void csr_vector(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, int WG_SIZE ){
float temp_sum = 0.;
while (row < stop_row){
temp_sum = 0.;
// Load in a bunch of partial results into your register space, rather than LDS (no contention)
// Then dump the partially reduced answers into the LDS for inter-work-item reduction.
// Using a long induction variable to make sure unsigned int overflow doesn't break things.
unsigned int vecStart = rowPtrs[row];
unsigned int vecEnd = rowPtrs[row+1];
for (long j = vecStart + Tid; j < vecEnd; j+=WG_SIZE)
{
const unsigned int col = cols[(unsigned int)j];
temp_sum += alpha*vals[(unsigned int)j]*vec[col];
}
partialSums[Tid] = temp_sum;
// Reduce partial sums
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
//temp_sum = sum2_reduce(temp_sum, &new_error, partialSums, lid, lid, WG_SIZE, i);
if ( WG_SIZE > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (Tid == 0UL)
{
if (beta != 0.)
out[row] = beta* out[row] + temp_sum;
// temp_sum = two_fma(beta, out[local_row], temp_sum, &sumk_e);
else
out[row] = temp_sum;
}
row++;
}
}
__device__ void csr_vectorL(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, unsigned int vecStart, unsigned int vecEnd, int WG_SIZE ){
// In CSR-LongRows, we have more than one workgroup calculating this row.
// The output values for those types of rows are stored using atomic_add, because
// more than one parallel workgroup's value makes up the final answer.
// Unfortunately, this makes it difficult to do y=Ax, rather than y=Ax+y, because
// the values still left in y will be added in using the atomic_add.
//
// Our solution is to have the first workgroup in one of these long-rows cases
// properly initaizlie the output vector. All the other workgroups working on this
// row will spin-loop until that workgroup finishes its work.
// First, figure out which workgroup you are in the row. Bottom 24 bits.
// You can use that to find the global ID for the first workgroup calculating
// this long row.
float temp_sum = 0.;
const unsigned int first_wg_in_row = Bid - (rowBlocks[Bid] & ((1UL << 24) - 1UL)); // WGBITS = 24
const unsigned int compare_value = rowBlocks[Bid] & (1UL << 24);
// Bit 24 in the first workgroup is the flag that everyone waits on.
if(Bid == first_wg_in_row && Tid == 0UL)
{
// The first workgroup handles the output initialization.
volatile float out_val = out[row];
temp_sum = (beta - 1.) * out_val;
atomicXor( (unsigned int*) &rowBlocks[first_wg_in_row], (unsigned int) (1UL << 24)); // Release other workgroups.
}
// For every other workgroup, bit 24 holds the value they wait on.
// If your bit 24 == first_wg's bit 24, you spin loop.
// The first workgroup will eventually flip this bit, and you can move forward.
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
while(Bid != first_wg_in_row &&
Tid == 0U &&
((atomicMax((unsigned int*) &rowBlocks[first_wg_in_row],(unsigned int) 0UL) & (1UL << 24)) == compare_value)); //WGBITS = 24
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
// After you've passed the barrier, update your local flag to make sure that
// the next time through, you know what to wait on.
if (Bid != first_wg_in_row && Tid == 0UL)
rowBlocks[Bid] ^= (1UL << 24); // WGBITS = 24
// All but the final workgroup in a long-row collaboration have the same start_row
// and stop_row. They only run for one iteration.
// Load in a bunch of partial results into your register space, rather than LDS (no contention)
// Then dump the partially reduced answers into the LDS for inter-work-item reduction.
// unsigned int vecStart = wg*(unsigned int)(BLOCK_MULTIPLIER*BLOCKSIZE) + rowPtrs[row];
// unsigned int vecEnd = (rowPtrs[row + 1] > vecStart + BLOCK_MULTIPLIER*BLOCKSIZE) ? vecStart + BLOCK_MULTIPLIER*BLOCKSIZE : rowPtrs[row+1];
const unsigned int col = vecStart + Tid;
if (row == stop_row) // inner thread, we can hardcode/unroll this loop
{
// Don't put BLOCK_MULTIPLIER*BLOCKSIZE as the stop point, because
// some GPU compilers will *aggressively* unroll this loop.
// That increases register pressure and reduces occupancy.
for (int j = 0; j < (int)(vecEnd - col); j += WG_SIZE)
{
temp_sum += alpha*vals[col + j]*vec[cols[col + j]];
}
}
else
{
for(int j = 0; j < (int)(vecEnd - col); j += WG_SIZE)
temp_sum += alpha*vals[col + j]*vec[cols[col + j]];
}
partialSums[Tid] = temp_sum;
// Reduce partial sums
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
// temp_sum = sum2_reduce(temp_sum, &new_error, partialSums, lid, lid, WG_SIZE, i);
if ( WG_SIZE > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (Tid == 0UL)
{
atomicAdd(&out[row], temp_sum);
}
}
__global__ void csr_adaptative(float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float* d_alpha, float* d_beta, unsigned int* d_blkSize,
unsigned int* d_blkMultiple, unsigned int* d_rowForVector, int rowBlockSize){
const unsigned int blkSize = *d_blkSize;
const unsigned int blkMultiple = *d_blkMultiple;
const unsigned int rowForVector = *d_rowForVector;
extern __shared__ float partialSums[];
int Bid = blockIdx.x ;
int Tid = threadIdx.x;
const float alpha = *d_alpha;
const float beta = *d_beta;
//test_GPU[0] = rowBlockSize;
int WGSIZE = blockDim.x;
if (Bid < rowBlockSize) {
unsigned int row = ((rowBlocks[Bid] >> 32) & ((1UL << 32) - 1UL)); // OWBITS = 32
unsigned int stop_row = ((rowBlocks[Bid + 1] >> 32) & ((1UL << 32) - 1UL));
unsigned int num_rows = stop_row - row;
unsigned int wg = rowBlocks[Bid] & ((1 << 24) - 1); // WGBITS = 24
unsigned int vecStart = wg*(unsigned int)(blkSize*blkMultiple) + rowPtrs[row];
unsigned int vecEnd = (rowPtrs[row + 1] > vecStart + blkSize*blkMultiple) ? vecStart + blkSize*blkMultiple : rowPtrs[row+1];
/*-------- if (num_rows == 0 || (num_rows == 1 && wg)) // CSR-LongRows case
{
num_rows = rowForVector;
stop_row = (wg ? row : (row + 1));
wg = 0;
// tab[Bid] = 15;
} ------*/
// if(row <= stop_row){
if (num_rows > rowForVector) //CSR-Stream case
{
csr_stream(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, WGSIZE );
//test_GPU[0] += 10;
}else if (num_rows >= 1 && !wg){ // CSR-Vector case.
csr_vector(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, WGSIZE);
//test_GPU[1] += 10;
}else{ //CSR-LongRows
csr_vectorL(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, vecStart, vecEnd, WGSIZE);
//test_GPU[2] += 10;
}
// }
}else{
unsigned int row = ((rowBlocks[Bid] >> 32) & ((1UL << 32) - 1UL)); // OWBITS = 32
unsigned int stop_row = ((rowBlocks[Bid + 1] >> 32) & ((1UL << 32) - 1UL));
csr_stream(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, 24, WGSIZE);
//test_GPU[0] += 10;
}
//test_GPU[0] += 3310;
}
__global__ void csr_adaptativeT(int *a){
int index = blockIdx.x*blockDim.x + threadIdx.x;
//a[index] = __clz(512) ;
}
| fe687574511374158d4f63d95177521a398b8dfa.cu | #include <cuda_runtime_api.h>
#include <iostream>
#include <list>
#include "tbb/concurrent_queue.h"
#include "math.h"
#include "timestamp.hpp"
#include "AdaptativeUtils.hpp"
__device__ void csr_stream(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out, unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, int WG_SIZE){
float temp_sum = 0.;
// int WG_SIZE = 256;
const unsigned int numThreadsForRed = wg;
const unsigned int col = rowPtrs[row] + Tid;
if (Bid != (gridDim.x - 1))
{
for(int i = 0; i < BLOCKSIZE; i += WG_SIZE)
partialSums[Tid + i] = alpha * vals[col + i] * vec[cols[col + i]];
}
else
{
// This is required so that we stay in bounds for vals[] and cols[].
// Otherwise, if the matrix's endpoints don't line up with BLOCKSIZE,
// we will buffer overflow. On today's dGPUs, this doesn't cause problems.
// The values are within a dGPU's page, which is zeroed out on allocation.
// However, this may change in the future (e.g. with shared virtual memory.)
// This causes a minor performance loss because this is the last workgroup
// to be launched, and this loop can't be unrolled.
const unsigned int max_to_load = rowPtrs[stop_row] - rowPtrs[row];
for(int i = 0; i < max_to_load; i += WG_SIZE)
partialSums[Tid + i] = alpha * vals[col + i] * vec[cols[col + i]];
}
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
if(numThreadsForRed > 1)
{
const unsigned int local_row = row + (Tid >> (31 - __clz(numThreadsForRed)));
const unsigned int local_first_val = rowPtrs[local_row] - rowPtrs[row];
const unsigned int local_last_val = rowPtrs[local_row + 1] - rowPtrs[row];
const unsigned int threadInBlock = Tid & (numThreadsForRed - 1);
if(local_row < stop_row)
{
// This is dangerous -- will infinite loop if your last value is within
// numThreadsForRed of MAX_UINT. Noticable performance gain to avoid a
// long induction variable here, though.
for(unsigned int local_cur_val = local_first_val + threadInBlock; local_cur_val < local_last_val; local_cur_val += numThreadsForRed)
temp_sum += partialSums[local_cur_val] ; //temp_sum = two_sum(partialSums[local_cur_val], temp_sum, &sumk_e);
}
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
// Step one of this two-stage reduction is done. Now each row has {numThreadsForRed}
// values sitting in the local memory. This means that, roughly, the beginning of
// LDS is full up to {workgroup size} entries.
// Now we perform a parallel reduction that sums together the answers for each
// row in parallel, leaving us an answer in 'temp_sum' for each row.
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
if ( numThreadsForRed > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (threadInBlock == 0 && local_row < stop_row)
{
// All of our write-outs check to see if the output vector should first be zeroed.
// If so, just do a write rather than a read-write. Measured to be a slight (~5%)
// performance improvement.
if (beta != 0.)
// temp_sum = std::fma(beta, out[local_row], temp_sum);
out[local_row] = beta* out[local_row] + temp_sum;
else
out[local_row] = temp_sum;
}
}else{
// In this case, we want to have each thread perform the reduction for a single row.
// Essentially, this looks like performing CSR-Scalar, except it is computed out of local memory.
// However, this reduction is also much faster than CSR-Scalar, because local memory
// is designed for scatter-gather operations.
// We need a while loop because there may be more rows than threads in the WG.
unsigned int local_row = row + Tid;
while(local_row < stop_row)
{
int local_first_val = (rowPtrs[local_row] - rowPtrs[row]);
int local_last_val = rowPtrs[local_row + 1] - rowPtrs[row];
temp_sum = 0.;
for (int local_cur_val = local_first_val; local_cur_val < local_last_val; local_cur_val++)
temp_sum += partialSums[local_cur_val];
// After you've done the reduction into the temp_sum register,
// put that into the output for each row.
if (beta != 0.)
out[local_row] = beta* out[local_row] + temp_sum;
// temp_sum = two_fma(beta, out[local_row], temp_sum, &sumk_e);
else
out[local_row] = temp_sum;
//out[local_row] = temp_sum + sumk_e;
local_row += WG_SIZE;
}
}
}
__device__ void csr_vector(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, int WG_SIZE ){
float temp_sum = 0.;
while (row < stop_row){
temp_sum = 0.;
// Load in a bunch of partial results into your register space, rather than LDS (no contention)
// Then dump the partially reduced answers into the LDS for inter-work-item reduction.
// Using a long induction variable to make sure unsigned int overflow doesn't break things.
unsigned int vecStart = rowPtrs[row];
unsigned int vecEnd = rowPtrs[row+1];
for (long j = vecStart + Tid; j < vecEnd; j+=WG_SIZE)
{
const unsigned int col = cols[(unsigned int)j];
temp_sum += alpha*vals[(unsigned int)j]*vec[col];
}
partialSums[Tid] = temp_sum;
// Reduce partial sums
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
//temp_sum = sum2_reduce(temp_sum, &new_error, partialSums, lid, lid, WG_SIZE, i);
if ( WG_SIZE > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (Tid == 0UL)
{
if (beta != 0.)
out[row] = beta* out[row] + temp_sum;
// temp_sum = two_fma(beta, out[local_row], temp_sum, &sumk_e);
else
out[row] = temp_sum;
}
row++;
}
}
__device__ void csr_vectorL(float* partialSums, float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float alpha, float beta, const unsigned int BLOCKSIZE , const unsigned int ROWS_FOR_VECTOR, const unsigned int BLOCK_MULTIPLIER, const unsigned int Bid, const unsigned Tid, unsigned int row, unsigned int stop_row, unsigned int wg, unsigned int vecStart, unsigned int vecEnd, int WG_SIZE ){
// In CSR-LongRows, we have more than one workgroup calculating this row.
// The output values for those types of rows are stored using atomic_add, because
// more than one parallel workgroup's value makes up the final answer.
// Unfortunately, this makes it difficult to do y=Ax, rather than y=Ax+y, because
// the values still left in y will be added in using the atomic_add.
//
// Our solution is to have the first workgroup in one of these long-rows cases
// properly initaizlie the output vector. All the other workgroups working on this
// row will spin-loop until that workgroup finishes its work.
// First, figure out which workgroup you are in the row. Bottom 24 bits.
// You can use that to find the global ID for the first workgroup calculating
// this long row.
float temp_sum = 0.;
const unsigned int first_wg_in_row = Bid - (rowBlocks[Bid] & ((1UL << 24) - 1UL)); // WGBITS = 24
const unsigned int compare_value = rowBlocks[Bid] & (1UL << 24);
// Bit 24 in the first workgroup is the flag that everyone waits on.
if(Bid == first_wg_in_row && Tid == 0UL)
{
// The first workgroup handles the output initialization.
volatile float out_val = out[row];
temp_sum = (beta - 1.) * out_val;
atomicXor( (unsigned int*) &rowBlocks[first_wg_in_row], (unsigned int) (1UL << 24)); // Release other workgroups.
}
// For every other workgroup, bit 24 holds the value they wait on.
// If your bit 24 == first_wg's bit 24, you spin loop.
// The first workgroup will eventually flip this bit, and you can move forward.
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
while(Bid != first_wg_in_row &&
Tid == 0U &&
((atomicMax((unsigned int*) &rowBlocks[first_wg_in_row],(unsigned int) 0UL) & (1UL << 24)) == compare_value)); //WGBITS = 24
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
// After you've passed the barrier, update your local flag to make sure that
// the next time through, you know what to wait on.
if (Bid != first_wg_in_row && Tid == 0UL)
rowBlocks[Bid] ^= (1UL << 24); // WGBITS = 24
// All but the final workgroup in a long-row collaboration have the same start_row
// and stop_row. They only run for one iteration.
// Load in a bunch of partial results into your register space, rather than LDS (no contention)
// Then dump the partially reduced answers into the LDS for inter-work-item reduction.
// unsigned int vecStart = wg*(unsigned int)(BLOCK_MULTIPLIER*BLOCKSIZE) + rowPtrs[row];
// unsigned int vecEnd = (rowPtrs[row + 1] > vecStart + BLOCK_MULTIPLIER*BLOCKSIZE) ? vecStart + BLOCK_MULTIPLIER*BLOCKSIZE : rowPtrs[row+1];
const unsigned int col = vecStart + Tid;
if (row == stop_row) // inner thread, we can hardcode/unroll this loop
{
// Don't put BLOCK_MULTIPLIER*BLOCKSIZE as the stop point, because
// some GPU compilers will *aggressively* unroll this loop.
// That increases register pressure and reduces occupancy.
for (int j = 0; j < (int)(vecEnd - col); j += WG_SIZE)
{
temp_sum += alpha*vals[col + j]*vec[cols[col + j]];
}
}
else
{
for(int j = 0; j < (int)(vecEnd - col); j += WG_SIZE)
temp_sum += alpha*vals[col + j]*vec[cols[col + j]];
}
partialSums[Tid] = temp_sum;
// Reduce partial sums
for (unsigned long i = (WG_SIZE >> 1); i > 0; i >>= 1)
{
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
// temp_sum = sum2_reduce(temp_sum, &new_error, partialSums, lid, lid, WG_SIZE, i);
if ( WG_SIZE > i ){
temp_sum += partialSums[Tid + i];
__syncthreads(); // barrier(CLK_LOCAL_MEM_FENCE);
partialSums[Tid] = temp_sum;
}
}
if (Tid == 0UL)
{
atomicAdd(&out[row], temp_sum);
}
}
__global__ void csr_adaptative(float* vals, int* cols, int* rowPtrs, float* vec, float* out,
unsigned long* rowBlocks, float* d_alpha, float* d_beta, unsigned int* d_blkSize,
unsigned int* d_blkMultiple, unsigned int* d_rowForVector, int rowBlockSize){
const unsigned int blkSize = *d_blkSize;
const unsigned int blkMultiple = *d_blkMultiple;
const unsigned int rowForVector = *d_rowForVector;
extern __shared__ float partialSums[];
int Bid = blockIdx.x ;
int Tid = threadIdx.x;
const float alpha = *d_alpha;
const float beta = *d_beta;
//test_GPU[0] = rowBlockSize;
int WGSIZE = blockDim.x;
if (Bid < rowBlockSize) {
unsigned int row = ((rowBlocks[Bid] >> 32) & ((1UL << 32) - 1UL)); // OWBITS = 32
unsigned int stop_row = ((rowBlocks[Bid + 1] >> 32) & ((1UL << 32) - 1UL));
unsigned int num_rows = stop_row - row;
unsigned int wg = rowBlocks[Bid] & ((1 << 24) - 1); // WGBITS = 24
unsigned int vecStart = wg*(unsigned int)(blkSize*blkMultiple) + rowPtrs[row];
unsigned int vecEnd = (rowPtrs[row + 1] > vecStart + blkSize*blkMultiple) ? vecStart + blkSize*blkMultiple : rowPtrs[row+1];
/*-------- if (num_rows == 0 || (num_rows == 1 && wg)) // CSR-LongRows case
{
num_rows = rowForVector;
stop_row = (wg ? row : (row + 1));
wg = 0;
// tab[Bid] = 15;
} ------*/
// if(row <= stop_row){
if (num_rows > rowForVector) //CSR-Stream case
{
csr_stream(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, WGSIZE );
//test_GPU[0] += 10;
}else if (num_rows >= 1 && !wg){ // CSR-Vector case.
csr_vector(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, WGSIZE);
//test_GPU[1] += 10;
}else{ //CSR-LongRows
csr_vectorL(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, wg, vecStart, vecEnd, WGSIZE);
//test_GPU[2] += 10;
}
// }
}else{
unsigned int row = ((rowBlocks[Bid] >> 32) & ((1UL << 32) - 1UL)); // OWBITS = 32
unsigned int stop_row = ((rowBlocks[Bid + 1] >> 32) & ((1UL << 32) - 1UL));
csr_stream(partialSums, vals, cols, rowPtrs, vec, out, rowBlocks, alpha, beta, blkSize, rowForVector, blkMultiple, Bid, Tid, row, stop_row, 24, WGSIZE);
//test_GPU[0] += 10;
}
//test_GPU[0] += 3310;
}
__global__ void csr_adaptativeT(int *a){
int index = blockIdx.x*blockDim.x + threadIdx.x;
//a[index] = __clz(512) ;
}
|
9f6ba9563177f7c3c7bf4810e7b3cf787c3406ad.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPairwise.hip"
#else
void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(add)(state, self_, src_, value * alpha);
}
void THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(sub)(state, self_, src_, value * alpha);
}
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(tril)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 0> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitxor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#endif
| 9f6ba9563177f7c3c7bf4810e7b3cf787c3406ad.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPairwise.cu"
#else
void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorAddConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorSubConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(add)(state, self_, src_, value * alpha);
}
void THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha)
{
THCTensor_(sub)(state, self_, src_, value * alpha);
}
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(tril)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 0> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitxor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitXorConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#endif
|
6ef4e191e943b8e54e984a9cee3ef7237484584f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 2, torch::RestrictPtrTraits> int_2d;
typedef torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> float_2d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_3d;
__global__ void weighted_sum_kernel(
const float_3d x,
const int_2d group,
const float_2d weights,
float_3d y
) {
int B = x.size(0);
int N = x.size(1);
int D = x.size(2);
int C = y.size(1);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int b_idx = idx / N;
int n_idx = idx % N;
if (b_idx >= B) return;
int c_idx = group[b_idx][n_idx];
if (c_idx < 0 || c_idx >= C) return;
float w = weights[b_idx][c_idx];
for (int d_idx = 0; d_idx < D; d_idx++) {
atomicAdd(&y[b_idx][c_idx][d_idx], x[b_idx][n_idx][d_idx] * w);
}
}
void weighted_sum(
const torch::Tensor x,
const torch::Tensor group,
const torch::Tensor weights,
torch::Tensor y
) {
int B = x.size(0);
int N = x.size(1);
int D = x.size(2);
int C = y.size(1);
const int threads = 1024;
int blocks = (B*N - 1) / threads + 1;
hipLaunchKernelGGL(( weighted_sum_kernel), dim3(blocks), dim3(threads), 0, 0,
x.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
group.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(),
weights.packed_accessor32<float, 2, torch::RestrictPtrTraits>(),
y.packed_accessor32<float, 3, torch::RestrictPtrTraits>()
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("_weighted_sum", &weighted_sum);
}
| 6ef4e191e943b8e54e984a9cee3ef7237484584f.cu | #include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 2, torch::RestrictPtrTraits> int_2d;
typedef torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> float_2d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_3d;
__global__ void weighted_sum_kernel(
const float_3d x,
const int_2d group,
const float_2d weights,
float_3d y
) {
int B = x.size(0);
int N = x.size(1);
int D = x.size(2);
int C = y.size(1);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int b_idx = idx / N;
int n_idx = idx % N;
if (b_idx >= B) return;
int c_idx = group[b_idx][n_idx];
if (c_idx < 0 || c_idx >= C) return;
float w = weights[b_idx][c_idx];
for (int d_idx = 0; d_idx < D; d_idx++) {
atomicAdd(&y[b_idx][c_idx][d_idx], x[b_idx][n_idx][d_idx] * w);
}
}
void weighted_sum(
const torch::Tensor x,
const torch::Tensor group,
const torch::Tensor weights,
torch::Tensor y
) {
int B = x.size(0);
int N = x.size(1);
int D = x.size(2);
int C = y.size(1);
const int threads = 1024;
int blocks = (B*N - 1) / threads + 1;
weighted_sum_kernel<<<blocks, threads>>>(
x.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
group.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(),
weights.packed_accessor32<float, 2, torch::RestrictPtrTraits>(),
y.packed_accessor32<float, 3, torch::RestrictPtrTraits>()
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("_weighted_sum", &weighted_sum);
}
|
d1d92354a2cd4eafc17c9d76d28c6f4f02bcaf62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// New CUDA kernel launch sequence does not require explicit specification of
// size/offset for each argument, so only the old way is tested.
//
// RUN: %clang_cc1 --std=c++11 -triple x86_64-unknown-linux-gnu -emit-llvm \
// RUN: -target-sdk-version=8.0 -o - %s \
// RUN: | FileCheck -check-prefixes=HOST-OLD,CHECK %s
// RUN: %clang_cc1 --std=c++11 -fcuda-is-device -triple nvptx64-nvidia-cuda \
// RUN: -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,CHECK %s
#include "Inputs/cuda.h"
struct U {
short x;
} __attribute__((packed));
struct S {
int *ptr;
char a;
U u;
};
// Clang should generate a packed LLVM struct for S (denoted by the <>s),
// otherwise this test isn't interesting.
// CHECK: %struct.S = type <{ i32*, i8, %struct.U, [5 x i8] }>
static_assert(alignof(S) == 8, "Unexpected alignment.");
// HOST-LABEL: @_Z6kernelc1SPi
// Marshalled kernel args should be:
// 1. offset 0, width 1
// 2. offset 8 (because alignof(S) == 8), width 16
// 3. offset 24, width 8
// HOST-OLD: call i32 @hipSetupArgument({{[^,]*}}, i64 1, i64 0)
// HOST-OLD: call i32 @hipSetupArgument({{[^,]*}}, i64 16, i64 8)
// HOST-OLD: call i32 @hipSetupArgument({{[^,]*}}, i64 8, i64 24)
// DEVICE-LABEL: @_Z6kernelc1SPi
// DEVICE-SAME: i8{{[^,]*}}, %struct.S* byval(%struct.S) align 8{{[^,]*}}, i32*
__global__ void kernel(char a, S s, int *b) {}
| d1d92354a2cd4eafc17c9d76d28c6f4f02bcaf62.cu | // New CUDA kernel launch sequence does not require explicit specification of
// size/offset for each argument, so only the old way is tested.
//
// RUN: %clang_cc1 --std=c++11 -triple x86_64-unknown-linux-gnu -emit-llvm \
// RUN: -target-sdk-version=8.0 -o - %s \
// RUN: | FileCheck -check-prefixes=HOST-OLD,CHECK %s
// RUN: %clang_cc1 --std=c++11 -fcuda-is-device -triple nvptx64-nvidia-cuda \
// RUN: -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,CHECK %s
#include "Inputs/cuda.h"
struct U {
short x;
} __attribute__((packed));
struct S {
int *ptr;
char a;
U u;
};
// Clang should generate a packed LLVM struct for S (denoted by the <>s),
// otherwise this test isn't interesting.
// CHECK: %struct.S = type <{ i32*, i8, %struct.U, [5 x i8] }>
static_assert(alignof(S) == 8, "Unexpected alignment.");
// HOST-LABEL: @_Z6kernelc1SPi
// Marshalled kernel args should be:
// 1. offset 0, width 1
// 2. offset 8 (because alignof(S) == 8), width 16
// 3. offset 24, width 8
// HOST-OLD: call i32 @cudaSetupArgument({{[^,]*}}, i64 1, i64 0)
// HOST-OLD: call i32 @cudaSetupArgument({{[^,]*}}, i64 16, i64 8)
// HOST-OLD: call i32 @cudaSetupArgument({{[^,]*}}, i64 8, i64 24)
// DEVICE-LABEL: @_Z6kernelc1SPi
// DEVICE-SAME: i8{{[^,]*}}, %struct.S* byval(%struct.S) align 8{{[^,]*}}, i32*
__global__ void kernel(char a, S s, int *b) {}
|
61a7f33ffc6d4c2aaca9db8526436b819daa0cd9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include "image_function_cuda.cuh"
#include "../parameter_validation.h"
#include "../image_function_helper.h"
#include "cuda_types.cuh"
#include "cuda_helper.cuh"
namespace
{
struct FunctionRegistrator
{
Image_Function_Helper::FunctionTableHolder table;
FunctionRegistrator()
{
table.AbsoluteDifference = &Image_Function_Cuda::AbsoluteDifference;
table.BitwiseAnd = &Image_Function_Cuda::BitwiseAnd;
table.BitwiseOr = &Image_Function_Cuda::BitwiseOr;
table.BitwiseXor = &Image_Function_Cuda::BitwiseXor;
table.ConvertToGrayScale = &Image_Function_Cuda::ConvertToGrayScale;
table.ConvertToRgb = &Image_Function_Cuda::ConvertToRgb;
table.Copy = &Image_Function_Cuda::Copy;
table.ExtractChannel = &Image_Function_Cuda::ExtractChannel;
table.Fill = &Image_Function_Cuda::Fill;
table.GammaCorrection = &Image_Function_Cuda::GammaCorrection;
table.Histogram = &Image_Function_Cuda::Histogram;
table.Invert = &Image_Function_Cuda::Invert;
table.LookupTable = &Image_Function_Cuda::LookupTable;
table.Maximum = &Image_Function_Cuda::Maximum;
table.Minimum = &Image_Function_Cuda::Minimum;
table.Subtract = &Image_Function_Cuda::Subtract;
table.Threshold = &Image_Function_Cuda::Threshold;
table.Threshold2 = &Image_Function_Cuda::Threshold;
ImageTypeManager::instance().setFunctionTable( PenguinV_Image::ImageCuda().type(), table );
ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertToCuda, PenguinV_Image::Image(), PenguinV_Image::ImageCuda() );
ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertFromCuda, PenguinV_Image::ImageCuda(), PenguinV_Image::Image() );
}
};
const FunctionRegistrator functionRegistrator;
// The list of CUDA device functions on device side
__global__ void absoluteDifferenceCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : ((*in2X) - (*in1X));
}
}
__global__ void bitwiseAndCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] & in2[idIn2];
}
}
__global__ void bitwiseOrCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] | in2[idIn2];
}
}
__global__ void bitwiseXorCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] ^ in2[idIn2];
}
}
__global__ void convertToGrayScaleCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * data = in + y * rowSizeIn + x * colorCount;
const uint8_t * dataEnd = data + colorCount;
uint32_t sum = 0;
for ( ; data != dataEnd; ++data )
{
sum += (*data);
}
const uint32_t id = y * rowSizeOut + x;
out[id] = static_cast<uint8_t>(sum / colorCount);
}
}
__global__ void convertToRgbCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint8_t colorCount,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * dataIn = in + y * rowSizeIn + x;
uint8_t * dataOut = out + y * rowSizeOut + x * colorCount;
const uint8_t * dataOutEnd = dataOut + colorCount;
for ( ; dataOut != dataOutEnd; ++dataOut )
{
(*dataOut) = (*dataIn);
}
}
}
__global__ void copyCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = in[y * rowSizeIn + x];
}
}
__global__ void extractChannelCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height )
out[y * rowSizeOut + x] = in[y * rowSizeIn + x * colorCount];
}
__global__ void fillCuda( uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint8_t value )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height )
data[y * rowSize + x] = value;
}
__global__ void flipCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
bool horizontal, bool vertical )
{
const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y;
if ( inX < width && inY < height ) {
const uint32_t outX = horizontal ? (width - 1 - inX) : inX;
const uint32_t outY = vertical ? (height - 1 - inY) : inY;
out[outY * rowSizeOut + outX] = in[inY * rowSizeIn + inX];
}
}
__global__ void histogramCuda( const uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * histogram )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * rowSize + x;
atomicAdd( &histogram[data[id]], 1 );
}
}
__global__ void invertCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = ~in[y * rowSizeIn + x];
}
}
__global__ void lookupTableCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height, uint8_t * table )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = table[in[y * rowSizeIn + x]];
}
}
__global__ void maximumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? (*in1X) : (*in2X);
}
}
__global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X);
}
}
__global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut,
float inXStart, float inYStart, uint32_t width, uint32_t height,
float cosAngle, float sinAngle )
{
uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y;
// Only do something if this thread is for a valid pixel in the output
if ( outX < width && outY < height ) {
// Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift
// comes from inverse rotating the horizontal and vertical iterations over the output.
// Note that inverse rotation by X axis is [cos(angle), -sin(angle)],
// and the inverse rotation by Y axis is [sin(angle), cos(angle)].
const float exactInX = inXStart + cosAngle * outX + sinAngle * outY;
const float exactInY = inYStart - sinAngle * outX + cosAngle * outY;
const int32_t inX = static_cast<int32_t>(exactInX);
const int32_t inY = static_cast<int32_t>(exactInY);
// Shift to the output pixel
out = out + outY * rowSizeOut + outX;
// Note that we will be taking an average with next pixels, so next pixels need to be in the image too
if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) {
*out = 0; // We do not actually know what is beyond the image, so set value to 0
}
else {
// Shift to the input pixel
in = in + inY * rowSizeIn + inX;
// Now we use a bilinear approximation to find the pixel intensity value. That is, we take an
// average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1).
// We add an offset of 0.5 so that conversion to integer is done using rounding.
const float probX = exactInX - inX;
const float probY = exactInY - inY;
const float mean = *in * (1 - probX) * (1 - probY) +
*(in + 1) * probX * (1 - probY) +
*(in + rowSizeIn) * (1 - probX) * probY +
*(in + rowSizeIn + 1) * probX * probY +
0.5f;
*out = static_cast<uint8_t>(mean);
}
}
}
__global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0;
}
}
__global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
uint8_t threshold )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = (in[y * rowSizeIn + x] < threshold) ? 0 : 255;
}
}
__global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
uint8_t minThreshold, uint8_t maxThreshold )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn = y * rowSizeIn + x;
out[y * rowSizeOut + x] = ((in[idIn] < minThreshold) || (in[idIn] > maxThreshold)) ? 0 : 255;
}
}
}
namespace Image_Function_Cuda
{
Image AbsoluteDifference( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2 );
}
void AbsoluteDifference( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2, out );
}
Image AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( absoluteDifferenceCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseAnd( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2 );
}
void BitwiseAnd( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2, out );
}
Image BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseAndCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseOr( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2 );
}
void BitwiseOr( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2, out );
}
Image BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseOrCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseXor( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2 );
}
void BitwiseXor( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2, out );
}
Image BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseXorCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image ConvertToCuda( const Image & in )
{
Image out = ImageCuda().generate( in.width(), in.height(), in.colorCount() );
ConvertToCuda( in, out );
return out;
}
void ConvertToCuda( const Image & in, Image & out )
{
Image_Function::ParameterValidation( in );
Image_Function::ParameterValidation( out );
if ( in.width() != out.width() || in.height() != out.height() ||
in.colorCount() != out.colorCount() )
throw imageException( "Bad input parameters in image function" );
if ( in.alignment() == 1u || (in.rowSize() == in.width() * in.colorCount()) )
{
const uint32_t size = in.rowSize() * in.height();
if ( !multiCuda::cudaSafeCheck( hipMemcpy( out.data(), in.data(), size * sizeof( uint8_t ), hipMemcpyHostToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
else
{
if ( !multiCuda::cudaSafeCheck( hipMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(),
in.colorCount() * in.width(), in.height(), hipMemcpyHostToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
Image ConvertFromCuda( const Image & in )
{
Image out( in.width(), in.height(), in.colorCount(), 1u );
ConvertFromCuda( in, out );
return out;
}
void ConvertFromCuda(const Image & in, Image & out )
{
Image_Function::ParameterValidation( in );
Image_Function::ParameterValidation( out );
if ( in.width() != out.width() || in.height() != out.height() ||
in.colorCount() != out.colorCount() )
throw imageException( "Bad input parameters in image function" );
if ( out.alignment() == 1u || (out.rowSize() == out.width() * out.colorCount()) )
{
const uint32_t size = in.rowSize() * in.height();
if ( !multiCuda::cudaSafeCheck( hipMemcpy( out.data(), in.data(), size, hipMemcpyDeviceToHost ) ) )
throw imageException( "Cannot copy a memory from CUDA device" );
}
else
{
if ( !multiCuda::cudaSafeCheck( hipMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(),
in.colorCount() * in.width(), in.height(), hipMemcpyDeviceToHost ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
Image ConvertToGrayScale( const Image & in )
{
return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in );
}
void ConvertToGrayScale( const Image & in, Image & out )
{
Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, out );
}
Image ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, startXIn, startYIn, width, height );
}
void ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( out );
if ( in.colorCount() == PenguinV_Image::GRAY_SCALE ) {
Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
return;
}
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = in.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( convertToGrayScaleCuda, width, height,
inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height );
}
Image ConvertToRgb( const Image & in )
{
return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in );
}
void ConvertToRgb( const Image & in, Image & out )
{
Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, out );
}
Image ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, startXIn, startYIn, width, height );
}
void ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyRGBImage ( out );
if ( in.colorCount() == PenguinV_Image::RGB ) {
Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
return;
}
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = out.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( convertToRgbCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, colorCount, width, height );
}
void Copy( const Image & in, Image & out )
{
Image_Function::ParameterValidation( in, out );
out = in;
}
Image Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Copy( Copy, in, startXIn, startYIn, width, height );
}
void Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
width = width * colorCount;
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( copyCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height );
}
Image ExtractChannel( const Image & in, uint8_t channelId )
{
return Image_Function_Helper::ExtractChannel( ExtractChannel, in, channelId );
}
void ExtractChannel( const Image & in, Image & out, uint8_t channelId )
{
Image_Function_Helper::ExtractChannel( ExtractChannel, in, out, channelId );
}
Image ExtractChannel( const Image & in, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t channelId )
{
return Image_Function_Helper::ExtractChannel( ExtractChannel, in, x, y, width, height, channelId );
}
void ExtractChannel( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut,
uint32_t startYOut, uint32_t width, uint32_t height, uint8_t channelId )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( out );
if ( channelId >= in.colorCount() )
throw imageException( "Channel ID for color image is greater than channel count in input image" );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = in.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount + channelId;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( extractChannelCuda, width, height,
inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height );
}
void Fill( Image & image, uint8_t value )
{
image.fill( value );
}
void Fill( Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value )
{
Image_Function::ParameterValidation( image, x, y, width, height );
Image_Function::VerifyGrayScaleImage( image );
const uint32_t rowSize = image.rowSize();
uint8_t * imageY = image.data() + y * rowSize + x;
launchKernel2D( fillCuda, width, height,
imageY, rowSize, width, height, value );
}
Image Flip( const Image & in, bool horizontal, bool vertical )
{
Image_Function::ParameterValidation( in );
Image out = in.generate( in.width(), in.height(), in.colorCount(), in.alignment() );
Flip( in, out, horizontal, vertical );
return out;
}
void Flip( const Image & in, Image & out, bool horizontal, bool vertical )
{
Image_Function::ParameterValidation( in, out );
Image_Function::VerifyGrayScaleImage( in, out );
if ( !horizontal && !vertical ) {
Copy( in, out );
}
else {
launchKernel2D( flipCuda, out.width(), out.height(),
in.data(), in.rowSize(), out.data(), out.rowSize(), out.width(), out.height(), horizontal, vertical );
}
}
Image GammaCorrection( const Image & in, double a, double gamma )
{
return Image_Function_Helper::GammaCorrection( GammaCorrection, in, a, gamma );
}
void GammaCorrection( const Image & in, Image & out, double a, double gamma )
{
Image_Function_Helper::GammaCorrection( GammaCorrection, in, out, a, gamma );
}
Image GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, double a, double gamma )
{
return Image_Function_Helper::GammaCorrection( GammaCorrection, in, startXIn, startYIn, width, height, a, gamma );
}
void GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, double a, double gamma )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
if ( a < 0 || gamma < 0 )
throw imageException( "Gamma correction parameters are invalid" );
// We precalculate all values and store them in lookup table
std::vector < uint8_t > value( 256, 255u );
for ( uint16_t i = 0; i < 256; ++i ) {
double data = a * pow( i / 255.0, gamma ) * 255 + 0.5;
if ( data < 256 )
value[i] = static_cast<uint8_t>(data);
}
LookupTable( in, startXIn, startYIn, out, startXOut, startYOut, width, height, value );
}
uint8_t GetThreshold( const std::vector < uint32_t > & histogram )
{
return Image_Function_Helper::GetThreshold( histogram );
}
std::vector < uint32_t > Histogram( const Image & image )
{
return Image_Function_Helper::Histogram( Histogram, image );
}
void Histogram( const Image & image, std::vector < uint32_t > & histogram )
{
Image_Function_Helper::Histogram( Histogram, image, histogram );
}
std::vector < uint32_t > Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Histogram( Histogram, image, x, y, width, height );
}
void Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, std::vector < uint32_t > & histogram )
{
Image_Function::ParameterValidation( image, x, y, width, height );
Image_Function::VerifyGrayScaleImage( image );
histogram.resize( 256u );
std::fill( histogram.begin(), histogram.end(), 0u );
const uint32_t rowSize = image.rowSize();
const uint8_t * imageY = image.data() + y * rowSize + x;
multiCuda::Array< uint32_t > tableCuda( histogram );
launchKernel2D( histogramCuda, width, height,
imageY, rowSize, width, height, tableCuda.data() );
histogram = tableCuda.get();
}
Image Invert( const Image & in )
{
return Image_Function_Helper::Invert( Invert, in );
}
void Invert( const Image & in, Image & out )
{
Image_Function_Helper::Invert( Invert, in, out );
}
Image Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Invert( Invert, in, startXIn, startYIn, width, height );
}
void Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in, out );
width = width * colorCount;
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( invertCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height );
}
Image LookupTable( const Image & in, const std::vector < uint8_t > & table )
{
return Image_Function_Helper::LookupTable( LookupTable, in, table );
}
void LookupTable( const Image & in, Image & out, const std::vector < uint8_t > & table )
{
Image_Function_Helper::LookupTable( LookupTable, in, out, table );
}
Image LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height,
const std::vector < uint8_t > & table )
{
return Image_Function_Helper::LookupTable( LookupTable, in, startXIn, startYIn, width, height, table );
}
void LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, const std::vector < uint8_t > & table )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
if ( table.size() != 256u )
throw imageException( "Lookup table size is not equal to 256" );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
multiCuda::Array< uint8_t > tableCuda( table );
launchKernel2D( lookupTableCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, tableCuda.data() );
}
Image Maximum( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Maximum( Maximum, in1, in2 );
}
void Maximum( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Maximum( Maximum, in1, in2, out );
}
Image Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Maximum( Maximum, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( maximumCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image Minimum( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Minimum( Minimum, in1, in2 );
}
void Minimum( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Minimum( Minimum, in1, in2, out );
}
Image Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Minimum( Minimum, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( minimumCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
void Rotate( const Image & in, float centerXIn, float centerYIn, Image & out, float centerXOut, float centerYOut, float angle )
{
Image_Function::ParameterValidation( in, out );
Image_Function::VerifyGrayScaleImage( in, out );
const float cosAngle = cos( angle );
const float sinAngle = sin( angle );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint32_t width = in.width();
const uint32_t height = in.height();
uint8_t const * inMem = in.data();
uint8_t * outMem = out.data();
// We iterate over the output array in the usual manner; we iterate over the
// input using inverse rotation of this shift. Doing so, we start the input
// iteration at the following positions:
const float inXStart = -( cosAngle * centerXOut + sinAngle * centerYOut) + centerXIn;
const float inYStart = -(-sinAngle * centerXOut + cosAngle * centerYOut) + centerYIn;
launchKernel2D( rotateCuda, width, height,
inMem, rowSizeIn, outMem, rowSizeOut,
inXStart, inYStart, width, height,
cosAngle, sinAngle );
}
Image Subtract( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Subtract( Subtract, in1, in2 );
}
void Subtract( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Subtract( Subtract, in1, in2, out );
}
Image Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Subtract( Subtract, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( subtractCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image Threshold( const Image & in, uint8_t threshold )
{
return Image_Function_Helper::Threshold( Threshold, in, threshold );
}
void Threshold( const Image & in, Image & out, uint8_t threshold )
{
Image_Function_Helper::Threshold( Threshold, in, out, threshold );
}
Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t threshold )
{
return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, threshold );
}
void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, uint8_t threshold )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( thresholdCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, threshold );
}
Image Threshold( const Image & in, uint8_t minThreshold, uint8_t maxThreshold )
{
return Image_Function_Helper::Threshold( Threshold, in, minThreshold, maxThreshold );
}
void Threshold( const Image & in, Image & out, uint8_t minThreshold, uint8_t maxThreshold )
{
Image_Function_Helper::Threshold( Threshold, in, out, minThreshold, maxThreshold );
}
Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t minThreshold,
uint8_t maxThreshold )
{
return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, minThreshold, maxThreshold );
}
void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( thresholdCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, minThreshold, maxThreshold );
}
}
| 61a7f33ffc6d4c2aaca9db8526436b819daa0cd9.cu | #include <cuda_runtime.h>
#include <math.h>
#include "image_function_cuda.cuh"
#include "../parameter_validation.h"
#include "../image_function_helper.h"
#include "cuda_types.cuh"
#include "cuda_helper.cuh"
namespace
{
struct FunctionRegistrator
{
Image_Function_Helper::FunctionTableHolder table;
FunctionRegistrator()
{
table.AbsoluteDifference = &Image_Function_Cuda::AbsoluteDifference;
table.BitwiseAnd = &Image_Function_Cuda::BitwiseAnd;
table.BitwiseOr = &Image_Function_Cuda::BitwiseOr;
table.BitwiseXor = &Image_Function_Cuda::BitwiseXor;
table.ConvertToGrayScale = &Image_Function_Cuda::ConvertToGrayScale;
table.ConvertToRgb = &Image_Function_Cuda::ConvertToRgb;
table.Copy = &Image_Function_Cuda::Copy;
table.ExtractChannel = &Image_Function_Cuda::ExtractChannel;
table.Fill = &Image_Function_Cuda::Fill;
table.GammaCorrection = &Image_Function_Cuda::GammaCorrection;
table.Histogram = &Image_Function_Cuda::Histogram;
table.Invert = &Image_Function_Cuda::Invert;
table.LookupTable = &Image_Function_Cuda::LookupTable;
table.Maximum = &Image_Function_Cuda::Maximum;
table.Minimum = &Image_Function_Cuda::Minimum;
table.Subtract = &Image_Function_Cuda::Subtract;
table.Threshold = &Image_Function_Cuda::Threshold;
table.Threshold2 = &Image_Function_Cuda::Threshold;
ImageTypeManager::instance().setFunctionTable( PenguinV_Image::ImageCuda().type(), table );
ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertToCuda, PenguinV_Image::Image(), PenguinV_Image::ImageCuda() );
ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertFromCuda, PenguinV_Image::ImageCuda(), PenguinV_Image::Image() );
}
};
const FunctionRegistrator functionRegistrator;
// The list of CUDA device functions on device side
__global__ void absoluteDifferenceCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : ((*in2X) - (*in1X));
}
}
__global__ void bitwiseAndCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] & in2[idIn2];
}
}
__global__ void bitwiseOrCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] | in2[idIn2];
}
}
__global__ void bitwiseXorCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn1 = y * rowSizeIn1 + x;
const uint32_t idIn2 = y * rowSizeIn2 + x;
const uint32_t idOut = y * rowSizeOut + x;
out[idOut] = in1[idIn1] ^ in2[idIn2];
}
}
__global__ void convertToGrayScaleCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * data = in + y * rowSizeIn + x * colorCount;
const uint8_t * dataEnd = data + colorCount;
uint32_t sum = 0;
for ( ; data != dataEnd; ++data )
{
sum += (*data);
}
const uint32_t id = y * rowSizeOut + x;
out[id] = static_cast<uint8_t>(sum / colorCount);
}
}
__global__ void convertToRgbCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint8_t colorCount,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * dataIn = in + y * rowSizeIn + x;
uint8_t * dataOut = out + y * rowSizeOut + x * colorCount;
const uint8_t * dataOutEnd = dataOut + colorCount;
for ( ; dataOut != dataOutEnd; ++dataOut )
{
(*dataOut) = (*dataIn);
}
}
}
__global__ void copyCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = in[y * rowSizeIn + x];
}
}
__global__ void extractChannelCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height )
out[y * rowSizeOut + x] = in[y * rowSizeIn + x * colorCount];
}
__global__ void fillCuda( uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint8_t value )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height )
data[y * rowSize + x] = value;
}
__global__ void flipCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
bool horizontal, bool vertical )
{
const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y;
if ( inX < width && inY < height ) {
const uint32_t outX = horizontal ? (width - 1 - inX) : inX;
const uint32_t outY = vertical ? (height - 1 - inY) : inY;
out[outY * rowSizeOut + outX] = in[inY * rowSizeIn + inX];
}
}
__global__ void histogramCuda( const uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * histogram )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t id = y * rowSize + x;
atomicAdd( &histogram[data[id]], 1 );
}
}
__global__ void invertCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = ~in[y * rowSizeIn + x];
}
}
__global__ void lookupTableCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut,
uint32_t width, uint32_t height, uint8_t * table )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = table[in[y * rowSizeIn + x]];
}
}
__global__ void maximumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? (*in1X) : (*in2X);
}
}
__global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X);
}
}
__global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut,
float inXStart, float inYStart, uint32_t width, uint32_t height,
float cosAngle, float sinAngle )
{
uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y;
// Only do something if this thread is for a valid pixel in the output
if ( outX < width && outY < height ) {
// Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift
// comes from inverse rotating the horizontal and vertical iterations over the output.
// Note that inverse rotation by X axis is [cos(angle), -sin(angle)],
// and the inverse rotation by Y axis is [sin(angle), cos(angle)].
const float exactInX = inXStart + cosAngle * outX + sinAngle * outY;
const float exactInY = inYStart - sinAngle * outX + cosAngle * outY;
const int32_t inX = static_cast<int32_t>(exactInX);
const int32_t inY = static_cast<int32_t>(exactInY);
// Shift to the output pixel
out = out + outY * rowSizeOut + outX;
// Note that we will be taking an average with next pixels, so next pixels need to be in the image too
if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) {
*out = 0; // We do not actually know what is beyond the image, so set value to 0
}
else {
// Shift to the input pixel
in = in + inY * rowSizeIn + inX;
// Now we use a bilinear approximation to find the pixel intensity value. That is, we take an
// average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1).
// We add an offset of 0.5 so that conversion to integer is done using rounding.
const float probX = exactInX - inX;
const float probY = exactInY - inY;
const float mean = *in * (1 - probX) * (1 - probY) +
*(in + 1) * probX * (1 - probY) +
*(in + rowSizeIn) * (1 - probX) * probY +
*(in + rowSizeIn + 1) * probX * probY +
0.5f;
*out = static_cast<uint8_t>(mean);
}
}
}
__global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2,
uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0;
}
}
__global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
uint8_t threshold )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
out[y * rowSizeOut + x] = (in[y * rowSizeIn + x] < threshold) ? 0 : 255;
}
}
__global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height,
uint8_t minThreshold, uint8_t maxThreshold )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint32_t idIn = y * rowSizeIn + x;
out[y * rowSizeOut + x] = ((in[idIn] < minThreshold) || (in[idIn] > maxThreshold)) ? 0 : 255;
}
}
}
namespace Image_Function_Cuda
{
Image AbsoluteDifference( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2 );
}
void AbsoluteDifference( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2, out );
}
Image AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( absoluteDifferenceCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseAnd( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2 );
}
void BitwiseAnd( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2, out );
}
Image BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseAndCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseOr( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2 );
}
void BitwiseOr( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2, out );
}
Image BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseOrCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image BitwiseXor( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2 );
}
void BitwiseXor( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2, out );
}
Image BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( bitwiseXorCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image ConvertToCuda( const Image & in )
{
Image out = ImageCuda().generate( in.width(), in.height(), in.colorCount() );
ConvertToCuda( in, out );
return out;
}
void ConvertToCuda( const Image & in, Image & out )
{
Image_Function::ParameterValidation( in );
Image_Function::ParameterValidation( out );
if ( in.width() != out.width() || in.height() != out.height() ||
in.colorCount() != out.colorCount() )
throw imageException( "Bad input parameters in image function" );
if ( in.alignment() == 1u || (in.rowSize() == in.width() * in.colorCount()) )
{
const uint32_t size = in.rowSize() * in.height();
if ( !multiCuda::cudaSafeCheck( cudaMemcpy( out.data(), in.data(), size * sizeof( uint8_t ), cudaMemcpyHostToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
else
{
if ( !multiCuda::cudaSafeCheck( cudaMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(),
in.colorCount() * in.width(), in.height(), cudaMemcpyHostToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
Image ConvertFromCuda( const Image & in )
{
Image out( in.width(), in.height(), in.colorCount(), 1u );
ConvertFromCuda( in, out );
return out;
}
void ConvertFromCuda(const Image & in, Image & out )
{
Image_Function::ParameterValidation( in );
Image_Function::ParameterValidation( out );
if ( in.width() != out.width() || in.height() != out.height() ||
in.colorCount() != out.colorCount() )
throw imageException( "Bad input parameters in image function" );
if ( out.alignment() == 1u || (out.rowSize() == out.width() * out.colorCount()) )
{
const uint32_t size = in.rowSize() * in.height();
if ( !multiCuda::cudaSafeCheck( cudaMemcpy( out.data(), in.data(), size, cudaMemcpyDeviceToHost ) ) )
throw imageException( "Cannot copy a memory from CUDA device" );
}
else
{
if ( !multiCuda::cudaSafeCheck( cudaMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(),
in.colorCount() * in.width(), in.height(), cudaMemcpyDeviceToHost ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
Image ConvertToGrayScale( const Image & in )
{
return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in );
}
void ConvertToGrayScale( const Image & in, Image & out )
{
Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, out );
}
Image ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, startXIn, startYIn, width, height );
}
void ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( out );
if ( in.colorCount() == PenguinV_Image::GRAY_SCALE ) {
Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
return;
}
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = in.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( convertToGrayScaleCuda, width, height,
inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height );
}
Image ConvertToRgb( const Image & in )
{
return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in );
}
void ConvertToRgb( const Image & in, Image & out )
{
Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, out );
}
Image ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, startXIn, startYIn, width, height );
}
void ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyRGBImage ( out );
if ( in.colorCount() == PenguinV_Image::RGB ) {
Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
return;
}
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = out.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( convertToRgbCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, colorCount, width, height );
}
void Copy( const Image & in, Image & out )
{
Image_Function::ParameterValidation( in, out );
out = in;
}
Image Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Copy( Copy, in, startXIn, startYIn, width, height );
}
void Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
width = width * colorCount;
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( copyCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height );
}
Image ExtractChannel( const Image & in, uint8_t channelId )
{
return Image_Function_Helper::ExtractChannel( ExtractChannel, in, channelId );
}
void ExtractChannel( const Image & in, Image & out, uint8_t channelId )
{
Image_Function_Helper::ExtractChannel( ExtractChannel, in, out, channelId );
}
Image ExtractChannel( const Image & in, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t channelId )
{
return Image_Function_Helper::ExtractChannel( ExtractChannel, in, x, y, width, height, channelId );
}
void ExtractChannel( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut,
uint32_t startYOut, uint32_t width, uint32_t height, uint8_t channelId )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( out );
if ( channelId >= in.colorCount() )
throw imageException( "Channel ID for color image is greater than channel count in input image" );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t colorCount = in.colorCount();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount + channelId;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( extractChannelCuda, width, height,
inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height );
}
void Fill( Image & image, uint8_t value )
{
image.fill( value );
}
void Fill( Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value )
{
Image_Function::ParameterValidation( image, x, y, width, height );
Image_Function::VerifyGrayScaleImage( image );
const uint32_t rowSize = image.rowSize();
uint8_t * imageY = image.data() + y * rowSize + x;
launchKernel2D( fillCuda, width, height,
imageY, rowSize, width, height, value );
}
Image Flip( const Image & in, bool horizontal, bool vertical )
{
Image_Function::ParameterValidation( in );
Image out = in.generate( in.width(), in.height(), in.colorCount(), in.alignment() );
Flip( in, out, horizontal, vertical );
return out;
}
void Flip( const Image & in, Image & out, bool horizontal, bool vertical )
{
Image_Function::ParameterValidation( in, out );
Image_Function::VerifyGrayScaleImage( in, out );
if ( !horizontal && !vertical ) {
Copy( in, out );
}
else {
launchKernel2D( flipCuda, out.width(), out.height(),
in.data(), in.rowSize(), out.data(), out.rowSize(), out.width(), out.height(), horizontal, vertical );
}
}
Image GammaCorrection( const Image & in, double a, double gamma )
{
return Image_Function_Helper::GammaCorrection( GammaCorrection, in, a, gamma );
}
void GammaCorrection( const Image & in, Image & out, double a, double gamma )
{
Image_Function_Helper::GammaCorrection( GammaCorrection, in, out, a, gamma );
}
Image GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, double a, double gamma )
{
return Image_Function_Helper::GammaCorrection( GammaCorrection, in, startXIn, startYIn, width, height, a, gamma );
}
void GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, double a, double gamma )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
if ( a < 0 || gamma < 0 )
throw imageException( "Gamma correction parameters are invalid" );
// We precalculate all values and store them in lookup table
std::vector < uint8_t > value( 256, 255u );
for ( uint16_t i = 0; i < 256; ++i ) {
double data = a * pow( i / 255.0, gamma ) * 255 + 0.5;
if ( data < 256 )
value[i] = static_cast<uint8_t>(data);
}
LookupTable( in, startXIn, startYIn, out, startXOut, startYOut, width, height, value );
}
uint8_t GetThreshold( const std::vector < uint32_t > & histogram )
{
return Image_Function_Helper::GetThreshold( histogram );
}
std::vector < uint32_t > Histogram( const Image & image )
{
return Image_Function_Helper::Histogram( Histogram, image );
}
void Histogram( const Image & image, std::vector < uint32_t > & histogram )
{
Image_Function_Helper::Histogram( Histogram, image, histogram );
}
std::vector < uint32_t > Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Histogram( Histogram, image, x, y, width, height );
}
void Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, std::vector < uint32_t > & histogram )
{
Image_Function::ParameterValidation( image, x, y, width, height );
Image_Function::VerifyGrayScaleImage( image );
histogram.resize( 256u );
std::fill( histogram.begin(), histogram.end(), 0u );
const uint32_t rowSize = image.rowSize();
const uint8_t * imageY = image.data() + y * rowSize + x;
multiCuda::Array< uint32_t > tableCuda( histogram );
launchKernel2D( histogramCuda, width, height,
imageY, rowSize, width, height, tableCuda.data() );
histogram = tableCuda.get();
}
Image Invert( const Image & in )
{
return Image_Function_Helper::Invert( Invert, in );
}
void Invert( const Image & in, Image & out )
{
Image_Function_Helper::Invert( Invert, in, out );
}
Image Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height )
{
return Image_Function_Helper::Invert( Invert, in, startXIn, startYIn, width, height );
}
void Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in, out );
width = width * colorCount;
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( invertCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height );
}
Image LookupTable( const Image & in, const std::vector < uint8_t > & table )
{
return Image_Function_Helper::LookupTable( LookupTable, in, table );
}
void LookupTable( const Image & in, Image & out, const std::vector < uint8_t > & table )
{
Image_Function_Helper::LookupTable( LookupTable, in, out, table );
}
Image LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height,
const std::vector < uint8_t > & table )
{
return Image_Function_Helper::LookupTable( LookupTable, in, startXIn, startYIn, width, height, table );
}
void LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, const std::vector < uint8_t > & table )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
if ( table.size() != 256u )
throw imageException( "Lookup table size is not equal to 256" );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
multiCuda::Array< uint8_t > tableCuda( table );
launchKernel2D( lookupTableCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, tableCuda.data() );
}
Image Maximum( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Maximum( Maximum, in1, in2 );
}
void Maximum( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Maximum( Maximum, in1, in2, out );
}
Image Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Maximum( Maximum, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( maximumCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image Minimum( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Minimum( Minimum, in1, in2 );
}
void Minimum( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Minimum( Minimum, in1, in2, out );
}
Image Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Minimum( Minimum, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( minimumCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
void Rotate( const Image & in, float centerXIn, float centerYIn, Image & out, float centerXOut, float centerYOut, float angle )
{
Image_Function::ParameterValidation( in, out );
Image_Function::VerifyGrayScaleImage( in, out );
const float cosAngle = cos( angle );
const float sinAngle = sin( angle );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint32_t width = in.width();
const uint32_t height = in.height();
uint8_t const * inMem = in.data();
uint8_t * outMem = out.data();
// We iterate over the output array in the usual manner; we iterate over the
// input using inverse rotation of this shift. Doing so, we start the input
// iteration at the following positions:
const float inXStart = -( cosAngle * centerXOut + sinAngle * centerYOut) + centerXIn;
const float inYStart = -(-sinAngle * centerXOut + cosAngle * centerYOut) + centerYIn;
launchKernel2D( rotateCuda, width, height,
inMem, rowSizeIn, outMem, rowSizeOut,
inXStart, inYStart, width, height,
cosAngle, sinAngle );
}
Image Subtract( const Image & in1, const Image & in2 )
{
return Image_Function_Helper::Subtract( Subtract, in1, in2 );
}
void Subtract( const Image & in1, const Image & in2, Image & out )
{
Image_Function_Helper::Subtract( Subtract, in1, in2, out );
}
Image Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
uint32_t width, uint32_t height )
{
return Image_Function_Helper::Subtract( Subtract, in1, startX1, startY1, in2, startX2, startY2, width, height );
}
void Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2,
Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height )
{
Image_Function::ParameterValidation( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height );
const uint8_t colorCount = Image_Function::CommonColorCount( in1, in2, out );
width = width * colorCount;
const uint32_t rowSizeIn1 = in1.rowSize();
const uint32_t rowSizeIn2 = in2.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount;
const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount;
launchKernel2D( subtractCuda, width, height,
in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height );
}
Image Threshold( const Image & in, uint8_t threshold )
{
return Image_Function_Helper::Threshold( Threshold, in, threshold );
}
void Threshold( const Image & in, Image & out, uint8_t threshold )
{
Image_Function_Helper::Threshold( Threshold, in, out, threshold );
}
Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t threshold )
{
return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, threshold );
}
void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, uint8_t threshold )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( thresholdCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, threshold );
}
Image Threshold( const Image & in, uint8_t minThreshold, uint8_t maxThreshold )
{
return Image_Function_Helper::Threshold( Threshold, in, minThreshold, maxThreshold );
}
void Threshold( const Image & in, Image & out, uint8_t minThreshold, uint8_t maxThreshold )
{
Image_Function_Helper::Threshold( Threshold, in, out, minThreshold, maxThreshold );
}
Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t minThreshold,
uint8_t maxThreshold )
{
return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, minThreshold, maxThreshold );
}
void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut,
uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold )
{
Image_Function::ParameterValidation( in, startXIn, startYIn, out, startXOut, startYOut, width, height );
Image_Function::VerifyGrayScaleImage( in, out );
const uint32_t rowSizeIn = in.rowSize();
const uint32_t rowSizeOut = out.rowSize();
const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn;
uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut;
launchKernel2D( thresholdCuda, width, height,
inY, rowSizeIn, outY, rowSizeOut, width, height, minThreshold, maxThreshold );
}
}
|
f7bd7ad9258eedad028754c08f96f97c8fffaacc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GaussianNBVarKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_data = NULL;
hipMalloc(&d_data, XSIZE*YSIZE);
const int *d_labels = NULL;
hipMalloc(&d_labels, XSIZE*YSIZE);
const float *feature_means_ = NULL;
hipMalloc(&feature_means_, XSIZE*YSIZE);
float *feature_vars_ = NULL;
hipMalloc(&feature_vars_, XSIZE*YSIZE);
const int *class_count_ = NULL;
hipMalloc(&class_count_, XSIZE*YSIZE);
const unsigned int n_samples_ = 1;
const unsigned int n_classes_ = 1;
const unsigned int n_features_ = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GaussianNBVarKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GaussianNBVarKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GaussianNBVarKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f7bd7ad9258eedad028754c08f96f97c8fffaacc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GaussianNBVarKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_data = NULL;
cudaMalloc(&d_data, XSIZE*YSIZE);
const int *d_labels = NULL;
cudaMalloc(&d_labels, XSIZE*YSIZE);
const float *feature_means_ = NULL;
cudaMalloc(&feature_means_, XSIZE*YSIZE);
float *feature_vars_ = NULL;
cudaMalloc(&feature_vars_, XSIZE*YSIZE);
const int *class_count_ = NULL;
cudaMalloc(&class_count_, XSIZE*YSIZE);
const unsigned int n_samples_ = 1;
const unsigned int n_classes_ = 1;
const unsigned int n_features_ = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GaussianNBVarKernel<<<gridBlock,threadBlock>>>(d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GaussianNBVarKernel<<<gridBlock,threadBlock>>>(d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GaussianNBVarKernel<<<gridBlock,threadBlock>>>(d_data,d_labels,feature_means_,feature_vars_,class_count_,n_samples_,n_classes_,n_features_);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
164b3901d449ac53fe07bc38f4e416ff4bd8f9e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RG.cuh"
/******* Kernel definitions ******/
__device__ __forceinline__ void VoteSphereProcess(float *NMap, int *Sphere, float *AVG_NMLE, int dimAzimut, int dimElev, float step, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
float nmle [3];
nmle [0] = NMap[3*idx];
nmle [1] = NMap[3*idx+1];
nmle [2] = NMap[3*idx+2];
int idx_sphere;
if (nmle [0] == 0.0 && nmle [1] == 0.0 && nmle [2] == 0.0)
return;
float alpha = (acos(nmle[2]) / PI) * 180.0; // [0;180]
float beta = (acos(nmle[0]/sin(alpha)) / PI) * 180.0; // [0;360]
if (nmle[1] < 0.0)
beta += 180.0;
idx_sphere = int(alpha/step)*dimAzimut + int(beta/step);
atomicAdd(&AVG_NMLE[3*idx_sphere], nmle [0]);
atomicAdd(&AVG_NMLE[3*idx_sphere+1], nmle [1]);
atomicAdd(&AVG_NMLE[3*idx_sphere+2], nmle [2]);
atomicAdd(&Sphere[idx_sphere], 1);
}
__global__ void VoteSphereKernel(float *NMap, int *Sphere, float *AVG_NMLE, int dimAzimut, int dimElev, float step, int n, int m) {
VoteSphereProcess(NMap, Sphere, AVG_NMLE, dimAzimut, dimElev, step, n, m);
}
__device__ __forceinline__ void VoteDistanceProcess(float *VMap, int *Indices, float *nmle, int *Space_count, float *Space, int dim, float epsilon, int ref_i, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices[idx];
if (idx_pt != ref_i)
return;
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float scal = pt[0]*nmle[0] + pt[1]*nmle[1] + pt[2]*nmle[2];
if (scal < -10.0 || scal >= 10.0)
return;
int idx_space = int((scal+10.0)/epsilon);
atomicAdd(&Space[idx_space], scal);
atomicAdd(&Space_count[idx_space], 1);
}
__global__ void VoteDistanceKernel(float *VMap, int *Indices, float *nmle, int *Space_count, float *Space, int dim, float epsilon, int ref_i, int n, int m) {
VoteDistanceProcess(VMap, Indices, nmle, Space_count, Space, dim, epsilon, ref_i, n, m);
}
__device__ __forceinline__ void GetIndicesProcess(float *NMap, int *Indices_dev, float *Plan_dev, float alpha, int nbPlan, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
float nmle [3];
nmle [0] = NMap[3*idx];
nmle [1] = NMap[3*idx+1];
nmle [2] = NMap[3*idx+2];
int idx_sphere;
if (nmle [0] == 0.0 && nmle [1] == 0.0 && nmle [2] == 0.0) {
Indices_dev[idx] = -1;
return;
}
float min_error = -2.0;
int k = 0;
int idx_plan = -1;
float nmletmp [3];
float error_alpha;
for (int l = 0; l < nbPlan; l++) {
nmletmp [0] = Plan_dev[3*l];
nmletmp [1] = Plan_dev[3*l+1];
nmletmp [2] = Plan_dev[3*l+2];
error_alpha = fabs(nmle[0]*nmletmp[0] + nmle[1]*nmletmp[1] + nmle[2]*nmletmp[2]);
if (error_alpha > min_error) {
min_error = error_alpha;
idx_plan = k;
}
k++;
}
if (min_error > cos(alpha)) {
Indices_dev[idx] = idx_plan;
} else {
Indices_dev[idx] = -1;
}
}
__global__ void GetIndicesKernel(float *NMap, int *Indices_dev, float *Plan_dev, float alpha, int nbPlan, int n, int m) {
GetIndicesProcess(NMap, Indices_dev, Plan_dev, alpha, nbPlan, n, m);
}
__device__ __forceinline__ void GetIndicesDistancesProcess(float *VMap, int *Indices_Final_dev, int *Indices_dev, float *Equations_dev, float epsilon, int nb_subPlan, int shift_eq, int ref_i, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices_dev[idx];
if (idx_pt != ref_i)
return;
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float min_error = 2.0;
int k = 0;
int idx_dist = -1;
float equa [4];
float error_dist;
for (int l = 0; l < nb_subPlan; l++) {
equa [0] = Equations_dev[4*l];
equa [1] = Equations_dev[4*l+1];
equa [2] = Equations_dev[4*l+2];
equa [3] = Equations_dev[4*l+3];
error_dist = fabs(pt[0]*equa[0] + pt[1]*equa[1] + pt[2]*equa[2] - equa[3]);
if (error_dist < min_error) {
min_error = error_dist;
idx_dist = k;
}
k++;
}
if (min_error < epsilon) {
Indices_Final_dev[idx] = idx_dist+shift_eq;
} else {
Indices_Final_dev[idx] = -1;
}
}
__global__ void GetIndicesDistancesKernel(float *VMap, int *Indices_Final_dev, int *Indices_dev, float *Equations_dev, float epsilon, int nb_subPlan, int shift_eq, int ref_i, int n, int m) {
GetIndicesDistancesProcess(VMap, Indices_Final_dev, Indices_dev, Equations_dev, epsilon, nb_subPlan, shift_eq, ref_i, n, m);
}
__device__ __forceinline__ void ProjectionProcess(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
/*int idx_pt = Indices_dev[idx];
if (idx_pt < 0)
return;*/
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float npt [3];
npt [0] = NMap[3*idx];
npt [1] = NMap[3*idx+1];
npt [2] = NMap[3*idx+2];
if (npt [0] == 0.0 && npt [1] == 0.0 && npt [2] == 0.0)
return;
float min_val_dist = 1.0e10;
int idx_pt = -1;
float error_dist, error_alpha, a, b, scal, d;
float proj [3];
float nml[3], e1[3], e2[3];
for (int count = 0; count < nbPlans; count++) {
nml[0] = Equations_dev[10*count]; nml[1] = Equations_dev[10*count+1]; nml[2] = Equations_dev[10*count+2];
e1[0] = Equations_dev[10*count+3]; e1[1] = Equations_dev[10*count+4]; e1[2] = Equations_dev[10*count+5];
e2[0] = Equations_dev[10*count+6]; e2[1] = Equations_dev[10*count+7]; e2[2] = Equations_dev[10*count+8];
d = Equations_dev[10*count+9];
error_dist = (pt[0])*nml[0] + (pt[1])*nml[1] + (pt[2])*nml[2] - d;
error_alpha = (npt[0]*nml[0] + npt[1]*nml[1] + npt[2]*nml[2]);
if (fabs(error_dist) > epsilon || fabs(error_alpha) < alpha)
continue;
if (fabs(error_dist) < min_val_dist) {
min_val_dist = fabs(error_dist);
idx_pt = count;
}
}
if (idx_pt == -1)
return;
float color [3];
color [0] = RGB[4*idx];
color [1] = RGB[4*idx+1];
color [2] = RGB[4*idx+2];
nml[0] = Equations_dev[10*idx_pt]; nml[1] = Equations_dev[10*idx_pt+1]; nml[2] = Equations_dev[10*idx_pt+2];
e1[0] = Equations_dev[10*idx_pt+3]; e1[1] = Equations_dev[10*idx_pt+4]; e1[2] = Equations_dev[10*idx_pt+5];
e2[0] = Equations_dev[10*idx_pt+6]; e2[1] = Equations_dev[10*idx_pt+7]; e2[2] = Equations_dev[10*idx_pt+8];
d = Equations_dev[10*idx_pt+9];
scal = (pt [0])*nml[0]+(pt [1])*nml[1]+(pt [2])*nml[2] - d;
proj[0] = (pt [0]) - scal*nml[0];
proj[1] = (pt [1]) - scal*nml[1];
proj[2] = (pt [2]) - scal*nml[2];
a = proj[0]*e1[0] + proj[1]*e1[1] + proj[2]*e1[2];
b = proj[0]*e2[0] + proj[1]*e2[1] + proj[2]*e2[2];
atomicMin(&BBox[4*idx_pt], int(a/res));
atomicMax(&BBox[4*idx_pt+1], int(a/res));
atomicMin(&BBox[4*idx_pt+2], int(b/res));
atomicMax(&BBox[4*idx_pt+3], int(b/res));
Projected_dev[6*idx] = a/res;
Projected_dev[6*idx+1] = b/res;
Projected_dev[6*idx+2] = scal;
Projected_dev[6*idx+3] = color [0];
Projected_dev[6*idx+4] = color [1];
Projected_dev[6*idx+5] = color [2];
Indices_dev[idx] = idx_pt;
}
__global__ void ProjectionKernel(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
ProjectionProcess(Projected_dev, BBox, VMap, NMap, RGB, Indices_dev, Equations_dev, nbPlans, res, epsilon, alpha, n, m);
}
__device__ __forceinline__ void SegmentProcess(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] > 0)
return;
int s = 1;
int lb = max(0, i-s);
int ub = min(n, i+s+1);
int lr = max(0, j-s);
int ur = min(m, j+s+1);
float depth = VMap[3*idx+2];
float thresh_depth = 0.003;
float pt_l [3];
pt_l [0] = VMap[3*idx];
pt_l [1] = VMap[3*idx+1];
pt_l [2] = VMap[3*idx+2];
float pt [3];
pt [0] = pose[0]*pt_l[0] + pose[4]*pt_l[1] + pose[8]*pt_l[2] + pose[12];
pt [1] = pose[1]*pt_l[0] + pose[5]*pt_l[1] + pose[9]*pt_l[2] + pose[13];
pt [2] = pose[2]*pt_l[0] + pose[6]*pt_l[1] + pose[10]*pt_l[2] + pose[14];
float npt_l [3];
npt_l [0] = NMap[3*idx];
npt_l [1] = NMap[3*idx+1];
npt_l [2] = NMap[3*idx+2];
float npt [3];
npt [0] = pose[0]*npt_l[0] + pose[4]*npt_l[1] + pose[8]*npt_l[2];
npt [1] = pose[1]*npt_l[0] + pose[5]*npt_l[1] + pose[9]*npt_l[2];
npt [2] = pose[2]*npt_l[0] + pose[6]*npt_l[1] + pose[10]*npt_l[2];
if (npt [0] == 0.0 && npt [1] == 0.0 && npt [2] == 0.0)
return;
float error_dist, error_alpha, a, b, scal, d;
float nml[3], e1[3], e2[3];
for (int ki = lb; ki < ub; ki++) {
for (int kj = lr; kj < ur; kj++) {
if (Label[ki*m + kj] > 0 && fabs(VMap[3*(ki*m + kj)+2]-depth) < thresh_depth) {
int count = int(Label[ki*m + kj])-1;
nml[0] = Equations[10*count]; nml[1] = Equations[10*count+1]; nml[2] = Equations[10*count+2];
e1[0] = Equations[10*count+3]; e1[1] = Equations[10*count+4]; e1[2] = Equations[10*count+5];
e2[0] = Equations[10*count+6]; e2[1] = Equations[10*count+7]; e2[2] = Equations[10*count+8];
d = Equations[10*count+9];
error_dist = (pt[0])*nml[0] + (pt[1])*nml[1] + (pt[2])*nml[2] - d;
error_alpha = (npt[0]*nml[0] + npt[1]*nml[1] + npt[2]*nml[2]);
if (fabs(error_dist) > epsilon || error_alpha < alpha)
continue;
Label[idx] = Label[ki*m + kj];
return;
}
}
}
return;
}
__global__ void SegmentKernel(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
SegmentProcess(Label, VMap, NMap, pose, Equations, nbPlans, epsilon, alpha, n, m);
}
__device__ __forceinline__ void InitFragsProcess(float *Projected, int *Indices, int *Size, float *center, unsigned short *TheBumps, unsigned char *TheRGBs, unsigned char *TheMasks, float *equation, int currj, float res, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices[idx];
if (currj != -1 && idx_pt != currj /* < 0*/)
return;
// The local origintransformed into the global coordinate system
float origin [3];
origin [0] = equation [0];
origin [1] = equation [1];
origin [2] = equation [2];
// The viewing direction of the local camera in the global coordinate system
float view_dir [3];
view_dir [0] = equation [3];
view_dir [1] = equation [4];
view_dir [2] = equation [5];
// The normal of the plane
float nmle [3];
nmle [0] = equation [6];
nmle [1] = equation [7];
nmle [2] = equation [8];
float rgb [3];
rgb [0] = Projected[6*idx+3];
rgb [1] = Projected[6*idx+4];
rgb [2] = Projected[6*idx+5];
float scal, a, b, alpha, theta, d, x, y;
a = Projected[6*idx];
b = Projected[6*idx+1];
scal = Projected[6*idx+2];
x = a*res;
y = b*res;
d = equation [15] + scal;
float pt [3];
pt [0] = x*equation [9] + y*equation [12] + d*nmle[0];
pt [1] = x*equation [10] + y*equation [13] + d*nmle[1];
pt [2] = x*equation [11] + y*equation [14] + d*nmle[2];
// The vector from the point to the origin
float vect [3];
vect [0] = origin [0] - pt [0];
vect [1] = origin [1] - pt [1];
vect [2] = origin [2] - pt [2];
float nrm = sqrt(vect [0]*vect [0] + vect [1]*vect [1] + vect [2]*vect [2]);
vect [0] = vect [0]/nrm;
vect [1] = vect [1]/nrm;
vect [2] = vect [2]/nrm;
// Dot product between nmle and vector
theta = nmle[0]*vect[0] + nmle[1]*vect[1] + nmle[2]*vect[2];
alpha = view_dir[0]*vect[0] + view_dir[1]*vect[1] + view_dir[2]*vect[2];
bool lockval = (theta > 0.8) && (alpha > 0.4);
int idxBump [2];
idxBump [0] = int(a -center[0]/res);
idxBump [1] = int(b -center[1]/res);
float shift [2];
shift[0] = (a -center[0]/res) - float(idxBump [0]);
shift[1] = (b -center[1]/res) - float(idxBump [1]);
if (idxBump [0] < 0 || idxBump [0] > Size[0]-1 || idxBump [1] < 0 || idxBump [1] > Size[1]-1)
return;
int old_mask = TheMasks[idxBump [0]*Size[1] + idxBump [1]]; ///atomicExch(&TheMasks[idxBump [0]*Size[1] + idxBump [1]], 11);
__syncthreads ();
if (old_mask == 10) {
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])] = unsigned short(shift[0]*60000.0);
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])+1] = unsigned short(shift[1]*60000.0);
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])+2] = unsigned short(((scal+15.0)/*/0.4*/)*2000.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])] = unsigned char(rgb[0]*255.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])+1] = unsigned char(rgb[1]*255.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])+2] = unsigned char(rgb[2]*255.0);
TheMasks[idxBump [0]*Size[1] + idxBump [1]] = 11;
}
}
__global__ void InitFragsKernel(float *Projected, int *Indices, int *Size, float *center, unsigned short *TheBumps, unsigned char *TheRGBs, unsigned char *TheMasks, float *equation, int currj, float res, int n, int m) {
InitFragsProcess(Projected, Indices, Size, center, TheBumps, TheRGBs, TheMasks, equation, currj, res, n, m);
}
__device__ __forceinline__ void InitValProcess(unsigned char *InOut, unsigned char val, int n, int m) {
// identifiant de thread a deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
InOut[idx] = val;
}
__global__ void InitValKernel(unsigned char *InOut, unsigned char val, int n, int m) {
InitValProcess(InOut, val, n, m);
}
__device__ __forceinline__ void ImDilateProcess(bool *res, bool *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-size-1 || j > m-size-1 || i < size || j < size)
return;
res[idx] = false;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if (Input[(i+k)*m + j+l]) {
res[idx] = true;
return;
}
}
}
}
__global__ void ImDilateKernel(bool *res, bool *Input, int n, int m, int size) {
ImDilateProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void ImDilateProcess(bool *res, unsigned char *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-size-1 || j > m-size-1 || i < size || j < size)
return;
res[idx] = false;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if (Input[(i+k)*m + j+l] > 10) {
res[idx] = true;
return;
}
}
}
}
__global__ void ImDilateKernel(bool *res, unsigned char *Input, int n, int m, int size) {
ImDilateProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void ImErodeProcess(bool *res, bool *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
res[idx] = true;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if ((i+k) > n-1 || j+l > m-1 || (i+k) < 0 || j+l < 0)
continue;
if (!Input[(i+k)*m + j+l] ) {
res[idx] = false;
return;
}
}
}
}
__global__ void ImErodeKernel(bool *res, bool *Input, int n, int m, int size) {
ImErodeProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void VotePlanProcess(unsigned char *Label, float *VMap, float* centers, float *count, float *pose, int n, int m) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] == 0)
return;
float pt_l [3];
pt_l [0] = VMap[3*idx];
pt_l [1] = VMap[3*idx+1];
pt_l [2] = VMap[3*idx+2];
float pt [3];
pt [0] = pose[0]*pt_l[0] + pose[4]*pt_l[1] + pose[8]*pt_l[2] + pose[12];
pt [1] = pose[1]*pt_l[0] + pose[5]*pt_l[1] + pose[9]*pt_l[2] + pose[13];
pt [2] = pose[2]*pt_l[0] + pose[6]*pt_l[1] + pose[10]*pt_l[2] + pose[14];
atomicAdd(¢ers[3*(Label[idx]-1)], pt [0]);
atomicAdd(¢ers[3*(Label[idx]-1)+1], pt [1]);
atomicAdd(¢ers[3*(Label[idx]-1)+2], pt [2]);
atomicAdd(&count[Label[idx]-1], 1.0);
}
__global__ void VotePlanKernel(unsigned char *Label, float *VMap, float* centers, float *count, float *pose, int n, int m) {
VotePlanProcess(Label, VMap, centers, count, pose, n, m);
}
__device__ __forceinline__ void AffectPlanProcess(unsigned char *Label, int *Buff, int n, int m) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] == 0)
return;
Label[idx] = unsigned char(Buff[Label[idx]-1]);
}
__global__ void AffectPlanKernel(unsigned char *Label, int *Buff, int n, int m) {
AffectPlanProcess(Label, Buff, n, m);
}
__global__ void AffectKernel(float **Tab, float *in, int indx){
Tab[indx] = in;
}
__global__ void AffectCharKernel(unsigned char **Tab, unsigned char *in, int indx){
Tab[indx] = in;
}
__global__ void AffectShortKernel(unsigned short **Tab, unsigned short *in, int indx){
Tab[indx] = in;
}
///**** Function definitions ****/
vector<float *> DetectPlans_cu(float *VMap, float *NMap, int *Indices_Final_dev, float epsilon, float alpha, int n, int m) {
int dimAzimut = int(360.0/alpha);
int dimElev = int(180.0/alpha);
int *Sphere = (int *) malloc(dimAzimut*dimElev*sizeof(int)); // 360/30 * 180/30
float *Avg_NMLE = (float *) malloc(3*dimAzimut*dimElev*sizeof(float)); // 3*(360/30 * 180/30)
int *Sphere_dev;
float *Avg_NMLE_dev;
checkCudaErrors( hipMalloc((void **) &Sphere_dev, dimAzimut*dimElev*sizeof(int)) );
checkCudaErrors( hipMemset(Sphere_dev, 0, dimAzimut*dimElev*sizeof(int)) );
checkCudaErrors( hipMalloc((void **) &Avg_NMLE_dev, 3*dimAzimut*dimElev*sizeof(float)) );
checkCudaErrors( hipMemset(Avg_NMLE_dev, 0, 3*dimAzimut*dimElev*sizeof(float)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( VoteSphereKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, NMap, Sphere_dev, Avg_NMLE_dev, dimAzimut, dimElev, alpha, n, m);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipMemcpy(Sphere, Sphere_dev, dimAzimut*dimElev*sizeof(int), hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(Avg_NMLE, Avg_NMLE_dev, 3*dimAzimut*dimElev*sizeof(float), hipMemcpyDeviceToHost) );
int count_pt;
vector<float *> NMLES_buff;
vector<int> NMLES_count_buff;
for (int i = 0; i < dimAzimut*dimElev; i++) {
count_pt = Sphere[i];
if (count_pt > 1000) {
float *nmletmp = (float *) malloc(3*sizeof(float));
nmletmp[0] = Avg_NMLE[3*i]/float(count_pt);
nmletmp[1] = Avg_NMLE[3*i+1]/float(count_pt);
nmletmp[2] = Avg_NMLE[3*i+2]/float(count_pt);
float norm = sqrt(nmletmp[0]*nmletmp[0] + nmletmp[1]*nmletmp[1] + nmletmp[2]*nmletmp[2]);
nmletmp[0] = nmletmp[0]/norm;
nmletmp[1] = nmletmp[1]/norm;
nmletmp[2] = nmletmp[2]/norm;
NMLES_buff.push_back(nmletmp);
NMLES_count_buff.push_back(count_pt);
}
}
int nbPlan = NMLES_buff.size();
///// Merge close enough clusters
// build equivalence matrix;
bool *equivalences = (bool *) malloc(nbPlan*nbPlan);
memset(equivalences, 0, nbPlan*nbPlan*sizeof(bool));
float error_alpha;
for (int i = 0; i < nbPlan; i++) {
float *nmlecurr = NMLES_buff[i];
equivalences[nbPlan*i + i] = true;
for (int j = i+1; j < nbPlan; j++) {
float *nmletmp = NMLES_buff[j];
error_alpha = fabs(nmlecurr[0]*nmletmp[0] + nmlecurr[1]*nmletmp[1] + nmlecurr[2]*nmletmp[2]);
if (error_alpha > cos(10.0*PI/180.0)) {
equivalences[nbPlan*i + j] = true;
equivalences[nbPlan*j + i] = true;
}
}
}
// Transitive closure by Floyd-Warshall algorithm
for (int i = 0; i < nbPlan; i++) {
for (int j = 0; j < nbPlan; j++) {
if (equivalences[nbPlan*i + j]) {
for (int k = 0; k < nbPlan; k++) {
equivalences[nbPlan*i + k] = equivalences[nbPlan*i + k] || equivalences[nbPlan*j + k];
}
}
}
}
vector<float *> NMLES;
vector<int> NMLES_count;
for (int i = 0; i < nbPlan; i++) {
if (equivalences[nbPlan*i + i]) {
float *nmlecurr = (float *) malloc(3*sizeof(float));
nmlecurr[0] = nmlecurr[1] = nmlecurr[2] = 0.0;
//int count_nmle = 0;
int count = 0;
for (int j = 0; j < nbPlan; j++) {
if (equivalences[nbPlan*j + i]) {
float *nmletmp = NMLES_buff[j];
nmlecurr[0] = nmlecurr[0] + float(NMLES_count_buff[j])*nmletmp[0];
nmlecurr[1] = nmlecurr[1] + float(NMLES_count_buff[j])*nmletmp[1];
nmlecurr[2] = nmlecurr[2] + float(NMLES_count_buff[j])*nmletmp[2];
//count_nmle ++;
equivalences[nbPlan*j + j] = false;
count += NMLES_count_buff[j];
}
}
if (count < 3000) {
free(nmlecurr);
continue;
}
nmlecurr[0] = nmlecurr[0]/float(count);
nmlecurr[1] = nmlecurr[1]/float(count);
nmlecurr[2] = nmlecurr[2]/float(count);
float norm = sqrt(nmlecurr[0]*nmlecurr[0] + nmlecurr[1]*nmlecurr[1] + nmlecurr[2]*nmlecurr[2]);
nmlecurr[0] = nmlecurr[0]/norm;
nmlecurr[1] = nmlecurr[1]/norm;
nmlecurr[2] = nmlecurr[2]/norm;
NMLES.push_back(nmlecurr);
NMLES_count.push_back(count);
}
}
for (vector<float *>::iterator it = NMLES_buff.begin(); it != NMLES_buff.end(); it++)
free((*it));
NMLES_buff.clear();
NMLES_count_buff.clear();
nbPlan = NMLES.size();
int *Indices_dev;
checkCudaErrors( hipMalloc((void **) &Indices_dev,n*m*sizeof(int)) );
checkCudaErrors( hipMemset(Indices_dev, 0, n*m*sizeof(int)) );
float *Plan = (float *) malloc(3*nbPlan*sizeof(float));
int nbPt = 0;
for (int i = 0; i < nbPlan; i++) {
Plan[3*i] = NMLES[i][0];
Plan[3*i+1] = NMLES[i][1];
Plan[3*i+2] = NMLES[i][2];
nbPt += NMLES_count[i];
}
float *Plan_dev;
checkCudaErrors( hipMalloc((void **) &Plan_dev,3*nbPlan*sizeof(float)) );
checkCudaErrors( hipMemcpy(Plan_dev, Plan, 3*nbPlan*sizeof(float), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( GetIndicesKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, NMap, Indices_dev, Plan_dev, 0.8f*alpha*PI/180.0f, nbPlan, n, m);
checkCudaErrors( hipDeviceSynchronize() );
//////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// Cluster in distance //////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
int dim = 2*(int(10.0/epsilon)+1);
float *Space = (float *) malloc (dim*sizeof(float));
int *Space_count = (int *) malloc (dim*sizeof(int));
int *Space_count_dev;
float *Space_dev;
float *curr_nmle_dev;
checkCudaErrors( hipMalloc((void **) &Space_count_dev, dim*sizeof(int)) );
checkCudaErrors( hipMalloc((void **) &Space_dev, dim*sizeof(float)) );
checkCudaErrors( hipMalloc((void **) &curr_nmle_dev, 3*sizeof(float)) );
vector<float *> EQUA_buff;
int shift_eq = 0;
for (int i = 0; i < nbPlan; i++) {
checkCudaErrors( hipMemcpy(curr_nmle_dev, NMLES[i], 3*sizeof(float), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemset(Space_count_dev, 0, dim*sizeof(int)) );
checkCudaErrors( hipMemset(Space_dev, 0, dim*sizeof(float)) );
hipLaunchKernelGGL(( VoteDistanceKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap, Indices_dev, curr_nmle_dev, Space_count_dev, Space_dev, dim, epsilon, i, n, m);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipMemcpy(Space_count, Space_count_dev, dim*sizeof(int), hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(Space, Space_dev, dim*sizeof(float), hipMemcpyDeviceToHost) );
vector<float *> EQUA_tmp;
vector<int> EQUA_count;
for (int j = 0; j < dim; j++) {
count_pt = Space_count [j];
if (count_pt > 1000) {
float *equa = (float *) malloc(4*sizeof(float));
equa[0] = NMLES[i][0];
equa[1] = NMLES[i][1];
equa[2] = NMLES[i][2];
equa[3] = Space[j]/float(count_pt);
EQUA_tmp.push_back(equa);
EQUA_count.push_back(count_pt);
}
}
int nb_subPlan = EQUA_tmp.size();
///// Merge close enough clusters
// build equivalence matrix;
bool *equivalences_dist = (bool *) malloc(nb_subPlan*nb_subPlan);
memset(equivalences_dist, 0, nb_subPlan*nb_subPlan*sizeof(bool));
float error_alpha;
for (int l = 0; l < nb_subPlan; l++) {
float *equa = EQUA_tmp[l];
equivalences_dist[nb_subPlan*l + l] = true;
for (int j = l+1; j < nb_subPlan; j++) {
float *equatmp = EQUA_tmp[j];
error_alpha = fabs(equa[3] - equatmp[3]);
if (error_alpha < 4.0*epsilon) {
equivalences_dist[nb_subPlan*l + j] = true;
equivalences_dist[nb_subPlan*j + l] = true;
}
}
}
// Transitive closure by Floyd-Warshall algorithm
for (int l = 0; l < nb_subPlan; l++) {
for (int j = 0; j < nb_subPlan; j++) {
if (equivalences_dist[nb_subPlan*l + j]) {
for (int k = 0; k < nb_subPlan; k++) {
equivalences_dist[nb_subPlan*l + k] = equivalences_dist[nb_subPlan*l + k] || equivalences_dist[nb_subPlan*j + k];
}
}
}
}
vector<float *> EQUA_LOC;
for (int l = 0; l < nb_subPlan; l++) {
if (equivalences_dist[nb_subPlan*l + l]) {
float *equacurr = (float *) malloc(4*sizeof(float));
equacurr[0] = NMLES[i][0];
equacurr[1] = NMLES[i][1];
equacurr[2] = NMLES[i][2];
equacurr[3] = 0.0; //NMLES[i][3]; //0.0;
//int count_nmle = 0;
int count = 0;
for (int j = 0; j < nb_subPlan; j++) {
if (equivalences_dist[nb_subPlan*j + l]) {
float *equatmp = EQUA_tmp[j];
equacurr[3] = equacurr[3] + float(EQUA_count[j])*equatmp[3];
//count_nmle ++;
equivalences_dist[nb_subPlan*j + j] = false;
count += EQUA_count[j];
}
}
if (count < 1000) {
free(equacurr);
continue;
}
equacurr[3] = equacurr[3]/float(count);
EQUA_LOC.push_back(equacurr);
EQUA_buff.push_back(equacurr);
}
}
for (vector<float *>::iterator it = EQUA_tmp.begin(); it != EQUA_tmp.end(); it++) {
free((*it));
}
EQUA_tmp.clear();
EQUA_count.clear();
free(equivalences_dist);
nb_subPlan = EQUA_LOC.size();
float *Equations = (float *) malloc(4*nb_subPlan*sizeof(float));
for (int j = 0; j < nb_subPlan; j++) {
Equations[4*j] = EQUA_LOC[j][0];
Equations[4*j+1] = EQUA_LOC[j][1];
Equations[4*j+2] = EQUA_LOC[j][2];
Equations[4*j+3] = EQUA_LOC[j][3];
}
float *Equations_dev;
checkCudaErrors( hipMalloc((void **) &Equations_dev,4*nb_subPlan*sizeof(float)) );
checkCudaErrors( hipMemcpy(Equations_dev, Equations, 4*nb_subPlan*sizeof(float), hipMemcpyHostToDevice) );
hipDeviceSynchronize();
hipLaunchKernelGGL(( GetIndicesDistancesKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap, Indices_Final_dev, Indices_dev, Equations_dev, 5.0*epsilon, nb_subPlan, shift_eq, i, n, m);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipFree(Equations_dev) );
free(Equations);
shift_eq += nb_subPlan;
EQUA_LOC.clear();
}
nbPlan = EQUA_buff.size();
for (vector<float *>::iterator it = NMLES.begin(); it != NMLES.end(); it++)
free(*it);
NMLES_buff.clear();
checkCudaErrors( hipFree(Plan_dev) );
checkCudaErrors( hipFree(Indices_dev) );
checkCudaErrors( hipFree(Sphere_dev) );
checkCudaErrors( hipFree(Avg_NMLE_dev) );
checkCudaErrors( hipFree(Space_count_dev) );
checkCudaErrors( hipFree(Space_dev) );
checkCudaErrors( hipFree(curr_nmle_dev) );
free(Sphere);
free(Avg_NMLE);
free(Plan);
free(equivalences);
free(Space);
free(Space_count);
return EQUA_buff;
}
void Project_on_primitives_cu(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_Final_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( ProjectionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Projected_dev, BBox, VMap, NMap, RGB, Indices_Final_dev, Equations_dev, nbPlans, res, epsilon, alpha, n, m);
checkCudaErrors( hipDeviceSynchronize() );
return;
}
void Segment_cu(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
float *pose_dev;
checkCudaErrors( hipMalloc((void **) &pose_dev, 16*sizeof(float)) );
checkCudaErrors( hipMemcpy(pose_dev, pose, 16 * sizeof(float), hipMemcpyHostToDevice) );
for (int i = 0; i < 3; i++)
hipLaunchKernelGGL(( SegmentKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Label, VMap, NMap, pose_dev, Equations, nbPlans, epsilon, alpha, n, m);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipFree(pose_dev) );
}
void InitFrags_cu(float *Projected_dev, int *Indices, int *Size, float *center, unsigned short *TheBumps_dev, unsigned char *TheRGBs_dev, unsigned char *TheMasks_dev, float *equation, int j, float res, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( InitFragsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Projected_dev, Indices, Size, center, TheBumps_dev, TheRGBs_dev, TheMasks_dev, equation, j, res, n, m);
checkCudaErrors( hipDeviceSynchronize() );
return;
}
void init_val(unsigned char *InOutput, unsigned char val, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( InitValKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, InOutput, val, n, m);
checkCudaErrors( hipDeviceSynchronize() );
return;
}
bool *ImDilate(bool *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( hipMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( hipMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( ImDilateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, res_dev, Im, n, m, size);
checkCudaErrors( hipDeviceSynchronize() );
return res_dev;
}
bool *ImDilate(unsigned char *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( hipMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( hipMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( ImDilateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, res_dev, Im, n, m, size);
checkCudaErrors( hipDeviceSynchronize() );
return res_dev;
}
bool *ImErode(bool *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( hipMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( hipMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( ImErodeKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, res_dev, Im, n, m, size);
checkCudaErrors( hipDeviceSynchronize() );
return res_dev;
}
void VotePlan_cu(unsigned char *Label, float *VMap, float *centers, float *count, float *pose, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
float *pose_dev;
checkCudaErrors( hipMalloc((void **) &pose_dev, 16*sizeof(float)) );
checkCudaErrors( hipMemcpy(pose_dev, pose, 16 * sizeof(float), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( VotePlanKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Label, VMap, centers, count, pose_dev, n, m);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipFree(pose_dev) );
}
void AffectPlan_cu(unsigned char *Label, int *Buff, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
hipLaunchKernelGGL(( AffectPlanKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Label, Buff, n, m);
checkCudaErrors( hipDeviceSynchronize() );
}
void Affect(float **Tab, float *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
hipLaunchKernelGGL(( AffectKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Tab, in, indx);
checkCudaErrors( hipDeviceSynchronize() );
}
void AffectChar(unsigned char **Tab, unsigned char *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
hipLaunchKernelGGL(( AffectCharKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Tab, in, indx);
checkCudaErrors( hipDeviceSynchronize() );
}
void AffectShort(unsigned short **Tab, unsigned short *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
hipLaunchKernelGGL(( AffectShortKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Tab, in, indx);
checkCudaErrors( hipDeviceSynchronize() );
} | 164b3901d449ac53fe07bc38f4e416ff4bd8f9e3.cu | #include "RG.cuh"
/******* Kernel definitions ******/
__device__ __forceinline__ void VoteSphereProcess(float *NMap, int *Sphere, float *AVG_NMLE, int dimAzimut, int dimElev, float step, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
float nmle [3];
nmle [0] = NMap[3*idx];
nmle [1] = NMap[3*idx+1];
nmle [2] = NMap[3*idx+2];
int idx_sphere;
if (nmle [0] == 0.0 && nmle [1] == 0.0 && nmle [2] == 0.0)
return;
float alpha = (acos(nmle[2]) / PI) * 180.0; // [0;180]
float beta = (acos(nmle[0]/sin(alpha)) / PI) * 180.0; // [0;360]
if (nmle[1] < 0.0)
beta += 180.0;
idx_sphere = int(alpha/step)*dimAzimut + int(beta/step);
atomicAdd(&AVG_NMLE[3*idx_sphere], nmle [0]);
atomicAdd(&AVG_NMLE[3*idx_sphere+1], nmle [1]);
atomicAdd(&AVG_NMLE[3*idx_sphere+2], nmle [2]);
atomicAdd(&Sphere[idx_sphere], 1);
}
__global__ void VoteSphereKernel(float *NMap, int *Sphere, float *AVG_NMLE, int dimAzimut, int dimElev, float step, int n, int m) {
VoteSphereProcess(NMap, Sphere, AVG_NMLE, dimAzimut, dimElev, step, n, m);
}
__device__ __forceinline__ void VoteDistanceProcess(float *VMap, int *Indices, float *nmle, int *Space_count, float *Space, int dim, float epsilon, int ref_i, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices[idx];
if (idx_pt != ref_i)
return;
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float scal = pt[0]*nmle[0] + pt[1]*nmle[1] + pt[2]*nmle[2];
if (scal < -10.0 || scal >= 10.0)
return;
int idx_space = int((scal+10.0)/epsilon);
atomicAdd(&Space[idx_space], scal);
atomicAdd(&Space_count[idx_space], 1);
}
__global__ void VoteDistanceKernel(float *VMap, int *Indices, float *nmle, int *Space_count, float *Space, int dim, float epsilon, int ref_i, int n, int m) {
VoteDistanceProcess(VMap, Indices, nmle, Space_count, Space, dim, epsilon, ref_i, n, m);
}
__device__ __forceinline__ void GetIndicesProcess(float *NMap, int *Indices_dev, float *Plan_dev, float alpha, int nbPlan, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
float nmle [3];
nmle [0] = NMap[3*idx];
nmle [1] = NMap[3*idx+1];
nmle [2] = NMap[3*idx+2];
int idx_sphere;
if (nmle [0] == 0.0 && nmle [1] == 0.0 && nmle [2] == 0.0) {
Indices_dev[idx] = -1;
return;
}
float min_error = -2.0;
int k = 0;
int idx_plan = -1;
float nmletmp [3];
float error_alpha;
for (int l = 0; l < nbPlan; l++) {
nmletmp [0] = Plan_dev[3*l];
nmletmp [1] = Plan_dev[3*l+1];
nmletmp [2] = Plan_dev[3*l+2];
error_alpha = fabs(nmle[0]*nmletmp[0] + nmle[1]*nmletmp[1] + nmle[2]*nmletmp[2]);
if (error_alpha > min_error) {
min_error = error_alpha;
idx_plan = k;
}
k++;
}
if (min_error > cos(alpha)) {
Indices_dev[idx] = idx_plan;
} else {
Indices_dev[idx] = -1;
}
}
__global__ void GetIndicesKernel(float *NMap, int *Indices_dev, float *Plan_dev, float alpha, int nbPlan, int n, int m) {
GetIndicesProcess(NMap, Indices_dev, Plan_dev, alpha, nbPlan, n, m);
}
__device__ __forceinline__ void GetIndicesDistancesProcess(float *VMap, int *Indices_Final_dev, int *Indices_dev, float *Equations_dev, float epsilon, int nb_subPlan, int shift_eq, int ref_i, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices_dev[idx];
if (idx_pt != ref_i)
return;
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float min_error = 2.0;
int k = 0;
int idx_dist = -1;
float equa [4];
float error_dist;
for (int l = 0; l < nb_subPlan; l++) {
equa [0] = Equations_dev[4*l];
equa [1] = Equations_dev[4*l+1];
equa [2] = Equations_dev[4*l+2];
equa [3] = Equations_dev[4*l+3];
error_dist = fabs(pt[0]*equa[0] + pt[1]*equa[1] + pt[2]*equa[2] - equa[3]);
if (error_dist < min_error) {
min_error = error_dist;
idx_dist = k;
}
k++;
}
if (min_error < epsilon) {
Indices_Final_dev[idx] = idx_dist+shift_eq;
} else {
Indices_Final_dev[idx] = -1;
}
}
__global__ void GetIndicesDistancesKernel(float *VMap, int *Indices_Final_dev, int *Indices_dev, float *Equations_dev, float epsilon, int nb_subPlan, int shift_eq, int ref_i, int n, int m) {
GetIndicesDistancesProcess(VMap, Indices_Final_dev, Indices_dev, Equations_dev, epsilon, nb_subPlan, shift_eq, ref_i, n, m);
}
__device__ __forceinline__ void ProjectionProcess(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
/*int idx_pt = Indices_dev[idx];
if (idx_pt < 0)
return;*/
float pt [3];
pt [0] = VMap[3*idx];
pt [1] = VMap[3*idx+1];
pt [2] = VMap[3*idx+2];
float npt [3];
npt [0] = NMap[3*idx];
npt [1] = NMap[3*idx+1];
npt [2] = NMap[3*idx+2];
if (npt [0] == 0.0 && npt [1] == 0.0 && npt [2] == 0.0)
return;
float min_val_dist = 1.0e10;
int idx_pt = -1;
float error_dist, error_alpha, a, b, scal, d;
float proj [3];
float nml[3], e1[3], e2[3];
for (int count = 0; count < nbPlans; count++) {
nml[0] = Equations_dev[10*count]; nml[1] = Equations_dev[10*count+1]; nml[2] = Equations_dev[10*count+2];
e1[0] = Equations_dev[10*count+3]; e1[1] = Equations_dev[10*count+4]; e1[2] = Equations_dev[10*count+5];
e2[0] = Equations_dev[10*count+6]; e2[1] = Equations_dev[10*count+7]; e2[2] = Equations_dev[10*count+8];
d = Equations_dev[10*count+9];
error_dist = (pt[0])*nml[0] + (pt[1])*nml[1] + (pt[2])*nml[2] - d;
error_alpha = (npt[0]*nml[0] + npt[1]*nml[1] + npt[2]*nml[2]);
if (fabs(error_dist) > epsilon || fabs(error_alpha) < alpha)
continue;
if (fabs(error_dist) < min_val_dist) {
min_val_dist = fabs(error_dist);
idx_pt = count;
}
}
if (idx_pt == -1)
return;
float color [3];
color [0] = RGB[4*idx];
color [1] = RGB[4*idx+1];
color [2] = RGB[4*idx+2];
nml[0] = Equations_dev[10*idx_pt]; nml[1] = Equations_dev[10*idx_pt+1]; nml[2] = Equations_dev[10*idx_pt+2];
e1[0] = Equations_dev[10*idx_pt+3]; e1[1] = Equations_dev[10*idx_pt+4]; e1[2] = Equations_dev[10*idx_pt+5];
e2[0] = Equations_dev[10*idx_pt+6]; e2[1] = Equations_dev[10*idx_pt+7]; e2[2] = Equations_dev[10*idx_pt+8];
d = Equations_dev[10*idx_pt+9];
scal = (pt [0])*nml[0]+(pt [1])*nml[1]+(pt [2])*nml[2] - d;
proj[0] = (pt [0]) - scal*nml[0];
proj[1] = (pt [1]) - scal*nml[1];
proj[2] = (pt [2]) - scal*nml[2];
a = proj[0]*e1[0] + proj[1]*e1[1] + proj[2]*e1[2];
b = proj[0]*e2[0] + proj[1]*e2[1] + proj[2]*e2[2];
atomicMin(&BBox[4*idx_pt], int(a/res));
atomicMax(&BBox[4*idx_pt+1], int(a/res));
atomicMin(&BBox[4*idx_pt+2], int(b/res));
atomicMax(&BBox[4*idx_pt+3], int(b/res));
Projected_dev[6*idx] = a/res;
Projected_dev[6*idx+1] = b/res;
Projected_dev[6*idx+2] = scal;
Projected_dev[6*idx+3] = color [0];
Projected_dev[6*idx+4] = color [1];
Projected_dev[6*idx+5] = color [2];
Indices_dev[idx] = idx_pt;
}
__global__ void ProjectionKernel(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
ProjectionProcess(Projected_dev, BBox, VMap, NMap, RGB, Indices_dev, Equations_dev, nbPlans, res, epsilon, alpha, n, m);
}
__device__ __forceinline__ void SegmentProcess(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] > 0)
return;
int s = 1;
int lb = max(0, i-s);
int ub = min(n, i+s+1);
int lr = max(0, j-s);
int ur = min(m, j+s+1);
float depth = VMap[3*idx+2];
float thresh_depth = 0.003;
float pt_l [3];
pt_l [0] = VMap[3*idx];
pt_l [1] = VMap[3*idx+1];
pt_l [2] = VMap[3*idx+2];
float pt [3];
pt [0] = pose[0]*pt_l[0] + pose[4]*pt_l[1] + pose[8]*pt_l[2] + pose[12];
pt [1] = pose[1]*pt_l[0] + pose[5]*pt_l[1] + pose[9]*pt_l[2] + pose[13];
pt [2] = pose[2]*pt_l[0] + pose[6]*pt_l[1] + pose[10]*pt_l[2] + pose[14];
float npt_l [3];
npt_l [0] = NMap[3*idx];
npt_l [1] = NMap[3*idx+1];
npt_l [2] = NMap[3*idx+2];
float npt [3];
npt [0] = pose[0]*npt_l[0] + pose[4]*npt_l[1] + pose[8]*npt_l[2];
npt [1] = pose[1]*npt_l[0] + pose[5]*npt_l[1] + pose[9]*npt_l[2];
npt [2] = pose[2]*npt_l[0] + pose[6]*npt_l[1] + pose[10]*npt_l[2];
if (npt [0] == 0.0 && npt [1] == 0.0 && npt [2] == 0.0)
return;
float error_dist, error_alpha, a, b, scal, d;
float nml[3], e1[3], e2[3];
for (int ki = lb; ki < ub; ki++) {
for (int kj = lr; kj < ur; kj++) {
if (Label[ki*m + kj] > 0 && fabs(VMap[3*(ki*m + kj)+2]-depth) < thresh_depth) {
int count = int(Label[ki*m + kj])-1;
nml[0] = Equations[10*count]; nml[1] = Equations[10*count+1]; nml[2] = Equations[10*count+2];
e1[0] = Equations[10*count+3]; e1[1] = Equations[10*count+4]; e1[2] = Equations[10*count+5];
e2[0] = Equations[10*count+6]; e2[1] = Equations[10*count+7]; e2[2] = Equations[10*count+8];
d = Equations[10*count+9];
error_dist = (pt[0])*nml[0] + (pt[1])*nml[1] + (pt[2])*nml[2] - d;
error_alpha = (npt[0]*nml[0] + npt[1]*nml[1] + npt[2]*nml[2]);
if (fabs(error_dist) > epsilon || error_alpha < alpha)
continue;
Label[idx] = Label[ki*m + kj];
return;
}
}
}
return;
}
__global__ void SegmentKernel(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
SegmentProcess(Label, VMap, NMap, pose, Equations, nbPlans, epsilon, alpha, n, m);
}
__device__ __forceinline__ void InitFragsProcess(float *Projected, int *Indices, int *Size, float *center, unsigned short *TheBumps, unsigned char *TheRGBs, unsigned char *TheMasks, float *equation, int currj, float res, int n, int m) {
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
int idx_pt = Indices[idx];
if (currj != -1 && idx_pt != currj /* < 0*/)
return;
// The local origintransformed into the global coordinate system
float origin [3];
origin [0] = equation [0];
origin [1] = equation [1];
origin [2] = equation [2];
// The viewing direction of the local camera in the global coordinate system
float view_dir [3];
view_dir [0] = equation [3];
view_dir [1] = equation [4];
view_dir [2] = equation [5];
// The normal of the plane
float nmle [3];
nmle [0] = equation [6];
nmle [1] = equation [7];
nmle [2] = equation [8];
float rgb [3];
rgb [0] = Projected[6*idx+3];
rgb [1] = Projected[6*idx+4];
rgb [2] = Projected[6*idx+5];
float scal, a, b, alpha, theta, d, x, y;
a = Projected[6*idx];
b = Projected[6*idx+1];
scal = Projected[6*idx+2];
x = a*res;
y = b*res;
d = equation [15] + scal;
float pt [3];
pt [0] = x*equation [9] + y*equation [12] + d*nmle[0];
pt [1] = x*equation [10] + y*equation [13] + d*nmle[1];
pt [2] = x*equation [11] + y*equation [14] + d*nmle[2];
// The vector from the point to the origin
float vect [3];
vect [0] = origin [0] - pt [0];
vect [1] = origin [1] - pt [1];
vect [2] = origin [2] - pt [2];
float nrm = sqrt(vect [0]*vect [0] + vect [1]*vect [1] + vect [2]*vect [2]);
vect [0] = vect [0]/nrm;
vect [1] = vect [1]/nrm;
vect [2] = vect [2]/nrm;
// Dot product between nmle and vector
theta = nmle[0]*vect[0] + nmle[1]*vect[1] + nmle[2]*vect[2];
alpha = view_dir[0]*vect[0] + view_dir[1]*vect[1] + view_dir[2]*vect[2];
bool lockval = (theta > 0.8) && (alpha > 0.4);
int idxBump [2];
idxBump [0] = int(a -center[0]/res);
idxBump [1] = int(b -center[1]/res);
float shift [2];
shift[0] = (a -center[0]/res) - float(idxBump [0]);
shift[1] = (b -center[1]/res) - float(idxBump [1]);
if (idxBump [0] < 0 || idxBump [0] > Size[0]-1 || idxBump [1] < 0 || idxBump [1] > Size[1]-1)
return;
int old_mask = TheMasks[idxBump [0]*Size[1] + idxBump [1]]; ///atomicExch(&TheMasks[idxBump [0]*Size[1] + idxBump [1]], 11);
__syncthreads ();
if (old_mask == 10) {
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])] = unsigned short(shift[0]*60000.0);
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])+1] = unsigned short(shift[1]*60000.0);
TheBumps[3*(idxBump [0]*Size[1] + idxBump [1])+2] = unsigned short(((scal+15.0)/*/0.4*/)*2000.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])] = unsigned char(rgb[0]*255.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])+1] = unsigned char(rgb[1]*255.0);
TheRGBs[3*(idxBump [0]*Size[1] + idxBump [1])+2] = unsigned char(rgb[2]*255.0);
TheMasks[idxBump [0]*Size[1] + idxBump [1]] = 11;
}
}
__global__ void InitFragsKernel(float *Projected, int *Indices, int *Size, float *center, unsigned short *TheBumps, unsigned char *TheRGBs, unsigned char *TheMasks, float *equation, int currj, float res, int n, int m) {
InitFragsProcess(Projected, Indices, Size, center, TheBumps, TheRGBs, TheMasks, equation, currj, res, n, m);
}
__device__ __forceinline__ void InitValProcess(unsigned char *InOut, unsigned char val, int n, int m) {
// identifiant de thread a deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
InOut[idx] = val;
}
__global__ void InitValKernel(unsigned char *InOut, unsigned char val, int n, int m) {
InitValProcess(InOut, val, n, m);
}
__device__ __forceinline__ void ImDilateProcess(bool *res, bool *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-size-1 || j > m-size-1 || i < size || j < size)
return;
res[idx] = false;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if (Input[(i+k)*m + j+l]) {
res[idx] = true;
return;
}
}
}
}
__global__ void ImDilateKernel(bool *res, bool *Input, int n, int m, int size) {
ImDilateProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void ImDilateProcess(bool *res, unsigned char *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
unsigned int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
unsigned int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
unsigned int idx = i*m + j;
if (i > n-size-1 || j > m-size-1 || i < size || j < size)
return;
res[idx] = false;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if (Input[(i+k)*m + j+l] > 10) {
res[idx] = true;
return;
}
}
}
}
__global__ void ImDilateKernel(bool *res, unsigned char *Input, int n, int m, int size) {
ImDilateProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void ImErodeProcess(bool *res, bool *Input, int n, int m, int size) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
res[idx] = true;
for (int k = -size; k < size+1; k++) {
for (int l = -size; l < size+1; l++) {
if ((i+k) > n-1 || j+l > m-1 || (i+k) < 0 || j+l < 0)
continue;
if (!Input[(i+k)*m + j+l] ) {
res[idx] = false;
return;
}
}
}
}
__global__ void ImErodeKernel(bool *res, bool *Input, int n, int m, int size) {
ImErodeProcess(res, Input, n, m, size);
}
__device__ __forceinline__ void VotePlanProcess(unsigned char *Label, float *VMap, float* centers, float *count, float *pose, int n, int m) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] == 0)
return;
float pt_l [3];
pt_l [0] = VMap[3*idx];
pt_l [1] = VMap[3*idx+1];
pt_l [2] = VMap[3*idx+2];
float pt [3];
pt [0] = pose[0]*pt_l[0] + pose[4]*pt_l[1] + pose[8]*pt_l[2] + pose[12];
pt [1] = pose[1]*pt_l[0] + pose[5]*pt_l[1] + pose[9]*pt_l[2] + pose[13];
pt [2] = pose[2]*pt_l[0] + pose[6]*pt_l[1] + pose[10]*pt_l[2] + pose[14];
atomicAdd(¢ers[3*(Label[idx]-1)], pt [0]);
atomicAdd(¢ers[3*(Label[idx]-1)+1], pt [1]);
atomicAdd(¢ers[3*(Label[idx]-1)+2], pt [2]);
atomicAdd(&count[Label[idx]-1], 1.0);
}
__global__ void VotePlanKernel(unsigned char *Label, float *VMap, float* centers, float *count, float *pose, int n, int m) {
VotePlanProcess(Label, VMap, centers, count, pose, n, m);
}
__device__ __forceinline__ void AffectPlanProcess(unsigned char *Label, int *Buff, int n, int m) {
// identifiant de thread ? deux dimensions, comme la matrice
int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X;
int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y;
int idx = i*m + j;
if (i > n-1 || j > m-1)
return;
if (Label[idx] == 0)
return;
Label[idx] = unsigned char(Buff[Label[idx]-1]);
}
__global__ void AffectPlanKernel(unsigned char *Label, int *Buff, int n, int m) {
AffectPlanProcess(Label, Buff, n, m);
}
__global__ void AffectKernel(float **Tab, float *in, int indx){
Tab[indx] = in;
}
__global__ void AffectCharKernel(unsigned char **Tab, unsigned char *in, int indx){
Tab[indx] = in;
}
__global__ void AffectShortKernel(unsigned short **Tab, unsigned short *in, int indx){
Tab[indx] = in;
}
///**** Function definitions ****/
vector<float *> DetectPlans_cu(float *VMap, float *NMap, int *Indices_Final_dev, float epsilon, float alpha, int n, int m) {
int dimAzimut = int(360.0/alpha);
int dimElev = int(180.0/alpha);
int *Sphere = (int *) malloc(dimAzimut*dimElev*sizeof(int)); // 360/30 * 180/30
float *Avg_NMLE = (float *) malloc(3*dimAzimut*dimElev*sizeof(float)); // 3*(360/30 * 180/30)
int *Sphere_dev;
float *Avg_NMLE_dev;
checkCudaErrors( cudaMalloc((void **) &Sphere_dev, dimAzimut*dimElev*sizeof(int)) );
checkCudaErrors( cudaMemset(Sphere_dev, 0, dimAzimut*dimElev*sizeof(int)) );
checkCudaErrors( cudaMalloc((void **) &Avg_NMLE_dev, 3*dimAzimut*dimElev*sizeof(float)) );
checkCudaErrors( cudaMemset(Avg_NMLE_dev, 0, 3*dimAzimut*dimElev*sizeof(float)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
VoteSphereKernel<<<dimGrid, dimBlock>>>(NMap, Sphere_dev, Avg_NMLE_dev, dimAzimut, dimElev, alpha, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaMemcpy(Sphere, Sphere_dev, dimAzimut*dimElev*sizeof(int), cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(Avg_NMLE, Avg_NMLE_dev, 3*dimAzimut*dimElev*sizeof(float), cudaMemcpyDeviceToHost) );
int count_pt;
vector<float *> NMLES_buff;
vector<int> NMLES_count_buff;
for (int i = 0; i < dimAzimut*dimElev; i++) {
count_pt = Sphere[i];
if (count_pt > 1000) {
float *nmletmp = (float *) malloc(3*sizeof(float));
nmletmp[0] = Avg_NMLE[3*i]/float(count_pt);
nmletmp[1] = Avg_NMLE[3*i+1]/float(count_pt);
nmletmp[2] = Avg_NMLE[3*i+2]/float(count_pt);
float norm = sqrt(nmletmp[0]*nmletmp[0] + nmletmp[1]*nmletmp[1] + nmletmp[2]*nmletmp[2]);
nmletmp[0] = nmletmp[0]/norm;
nmletmp[1] = nmletmp[1]/norm;
nmletmp[2] = nmletmp[2]/norm;
NMLES_buff.push_back(nmletmp);
NMLES_count_buff.push_back(count_pt);
}
}
int nbPlan = NMLES_buff.size();
///// Merge close enough clusters
// build equivalence matrix;
bool *equivalences = (bool *) malloc(nbPlan*nbPlan);
memset(equivalences, 0, nbPlan*nbPlan*sizeof(bool));
float error_alpha;
for (int i = 0; i < nbPlan; i++) {
float *nmlecurr = NMLES_buff[i];
equivalences[nbPlan*i + i] = true;
for (int j = i+1; j < nbPlan; j++) {
float *nmletmp = NMLES_buff[j];
error_alpha = fabs(nmlecurr[0]*nmletmp[0] + nmlecurr[1]*nmletmp[1] + nmlecurr[2]*nmletmp[2]);
if (error_alpha > cos(10.0*PI/180.0)) {
equivalences[nbPlan*i + j] = true;
equivalences[nbPlan*j + i] = true;
}
}
}
// Transitive closure by Floyd-Warshall algorithm
for (int i = 0; i < nbPlan; i++) {
for (int j = 0; j < nbPlan; j++) {
if (equivalences[nbPlan*i + j]) {
for (int k = 0; k < nbPlan; k++) {
equivalences[nbPlan*i + k] = equivalences[nbPlan*i + k] || equivalences[nbPlan*j + k];
}
}
}
}
vector<float *> NMLES;
vector<int> NMLES_count;
for (int i = 0; i < nbPlan; i++) {
if (equivalences[nbPlan*i + i]) {
float *nmlecurr = (float *) malloc(3*sizeof(float));
nmlecurr[0] = nmlecurr[1] = nmlecurr[2] = 0.0;
//int count_nmle = 0;
int count = 0;
for (int j = 0; j < nbPlan; j++) {
if (equivalences[nbPlan*j + i]) {
float *nmletmp = NMLES_buff[j];
nmlecurr[0] = nmlecurr[0] + float(NMLES_count_buff[j])*nmletmp[0];
nmlecurr[1] = nmlecurr[1] + float(NMLES_count_buff[j])*nmletmp[1];
nmlecurr[2] = nmlecurr[2] + float(NMLES_count_buff[j])*nmletmp[2];
//count_nmle ++;
equivalences[nbPlan*j + j] = false;
count += NMLES_count_buff[j];
}
}
if (count < 3000) {
free(nmlecurr);
continue;
}
nmlecurr[0] = nmlecurr[0]/float(count);
nmlecurr[1] = nmlecurr[1]/float(count);
nmlecurr[2] = nmlecurr[2]/float(count);
float norm = sqrt(nmlecurr[0]*nmlecurr[0] + nmlecurr[1]*nmlecurr[1] + nmlecurr[2]*nmlecurr[2]);
nmlecurr[0] = nmlecurr[0]/norm;
nmlecurr[1] = nmlecurr[1]/norm;
nmlecurr[2] = nmlecurr[2]/norm;
NMLES.push_back(nmlecurr);
NMLES_count.push_back(count);
}
}
for (vector<float *>::iterator it = NMLES_buff.begin(); it != NMLES_buff.end(); it++)
free((*it));
NMLES_buff.clear();
NMLES_count_buff.clear();
nbPlan = NMLES.size();
int *Indices_dev;
checkCudaErrors( cudaMalloc((void **) &Indices_dev,n*m*sizeof(int)) );
checkCudaErrors( cudaMemset(Indices_dev, 0, n*m*sizeof(int)) );
float *Plan = (float *) malloc(3*nbPlan*sizeof(float));
int nbPt = 0;
for (int i = 0; i < nbPlan; i++) {
Plan[3*i] = NMLES[i][0];
Plan[3*i+1] = NMLES[i][1];
Plan[3*i+2] = NMLES[i][2];
nbPt += NMLES_count[i];
}
float *Plan_dev;
checkCudaErrors( cudaMalloc((void **) &Plan_dev,3*nbPlan*sizeof(float)) );
checkCudaErrors( cudaMemcpy(Plan_dev, Plan, 3*nbPlan*sizeof(float), cudaMemcpyHostToDevice) );
GetIndicesKernel<<<dimGrid, dimBlock>>>(NMap, Indices_dev, Plan_dev, 0.8f*alpha*PI/180.0f, nbPlan, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
//////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// Cluster in distance //////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
int dim = 2*(int(10.0/epsilon)+1);
float *Space = (float *) malloc (dim*sizeof(float));
int *Space_count = (int *) malloc (dim*sizeof(int));
int *Space_count_dev;
float *Space_dev;
float *curr_nmle_dev;
checkCudaErrors( cudaMalloc((void **) &Space_count_dev, dim*sizeof(int)) );
checkCudaErrors( cudaMalloc((void **) &Space_dev, dim*sizeof(float)) );
checkCudaErrors( cudaMalloc((void **) &curr_nmle_dev, 3*sizeof(float)) );
vector<float *> EQUA_buff;
int shift_eq = 0;
for (int i = 0; i < nbPlan; i++) {
checkCudaErrors( cudaMemcpy(curr_nmle_dev, NMLES[i], 3*sizeof(float), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemset(Space_count_dev, 0, dim*sizeof(int)) );
checkCudaErrors( cudaMemset(Space_dev, 0, dim*sizeof(float)) );
VoteDistanceKernel<<<dimGrid, dimBlock>>>(VMap, Indices_dev, curr_nmle_dev, Space_count_dev, Space_dev, dim, epsilon, i, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaMemcpy(Space_count, Space_count_dev, dim*sizeof(int), cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(Space, Space_dev, dim*sizeof(float), cudaMemcpyDeviceToHost) );
vector<float *> EQUA_tmp;
vector<int> EQUA_count;
for (int j = 0; j < dim; j++) {
count_pt = Space_count [j];
if (count_pt > 1000) {
float *equa = (float *) malloc(4*sizeof(float));
equa[0] = NMLES[i][0];
equa[1] = NMLES[i][1];
equa[2] = NMLES[i][2];
equa[3] = Space[j]/float(count_pt);
EQUA_tmp.push_back(equa);
EQUA_count.push_back(count_pt);
}
}
int nb_subPlan = EQUA_tmp.size();
///// Merge close enough clusters
// build equivalence matrix;
bool *equivalences_dist = (bool *) malloc(nb_subPlan*nb_subPlan);
memset(equivalences_dist, 0, nb_subPlan*nb_subPlan*sizeof(bool));
float error_alpha;
for (int l = 0; l < nb_subPlan; l++) {
float *equa = EQUA_tmp[l];
equivalences_dist[nb_subPlan*l + l] = true;
for (int j = l+1; j < nb_subPlan; j++) {
float *equatmp = EQUA_tmp[j];
error_alpha = fabs(equa[3] - equatmp[3]);
if (error_alpha < 4.0*epsilon) {
equivalences_dist[nb_subPlan*l + j] = true;
equivalences_dist[nb_subPlan*j + l] = true;
}
}
}
// Transitive closure by Floyd-Warshall algorithm
for (int l = 0; l < nb_subPlan; l++) {
for (int j = 0; j < nb_subPlan; j++) {
if (equivalences_dist[nb_subPlan*l + j]) {
for (int k = 0; k < nb_subPlan; k++) {
equivalences_dist[nb_subPlan*l + k] = equivalences_dist[nb_subPlan*l + k] || equivalences_dist[nb_subPlan*j + k];
}
}
}
}
vector<float *> EQUA_LOC;
for (int l = 0; l < nb_subPlan; l++) {
if (equivalences_dist[nb_subPlan*l + l]) {
float *equacurr = (float *) malloc(4*sizeof(float));
equacurr[0] = NMLES[i][0];
equacurr[1] = NMLES[i][1];
equacurr[2] = NMLES[i][2];
equacurr[3] = 0.0; //NMLES[i][3]; //0.0;
//int count_nmle = 0;
int count = 0;
for (int j = 0; j < nb_subPlan; j++) {
if (equivalences_dist[nb_subPlan*j + l]) {
float *equatmp = EQUA_tmp[j];
equacurr[3] = equacurr[3] + float(EQUA_count[j])*equatmp[3];
//count_nmle ++;
equivalences_dist[nb_subPlan*j + j] = false;
count += EQUA_count[j];
}
}
if (count < 1000) {
free(equacurr);
continue;
}
equacurr[3] = equacurr[3]/float(count);
EQUA_LOC.push_back(equacurr);
EQUA_buff.push_back(equacurr);
}
}
for (vector<float *>::iterator it = EQUA_tmp.begin(); it != EQUA_tmp.end(); it++) {
free((*it));
}
EQUA_tmp.clear();
EQUA_count.clear();
free(equivalences_dist);
nb_subPlan = EQUA_LOC.size();
float *Equations = (float *) malloc(4*nb_subPlan*sizeof(float));
for (int j = 0; j < nb_subPlan; j++) {
Equations[4*j] = EQUA_LOC[j][0];
Equations[4*j+1] = EQUA_LOC[j][1];
Equations[4*j+2] = EQUA_LOC[j][2];
Equations[4*j+3] = EQUA_LOC[j][3];
}
float *Equations_dev;
checkCudaErrors( cudaMalloc((void **) &Equations_dev,4*nb_subPlan*sizeof(float)) );
checkCudaErrors( cudaMemcpy(Equations_dev, Equations, 4*nb_subPlan*sizeof(float), cudaMemcpyHostToDevice) );
cudaDeviceSynchronize();
GetIndicesDistancesKernel<<<dimGrid, dimBlock>>>(VMap, Indices_Final_dev, Indices_dev, Equations_dev, 5.0*epsilon, nb_subPlan, shift_eq, i, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaFree(Equations_dev) );
free(Equations);
shift_eq += nb_subPlan;
EQUA_LOC.clear();
}
nbPlan = EQUA_buff.size();
for (vector<float *>::iterator it = NMLES.begin(); it != NMLES.end(); it++)
free(*it);
NMLES_buff.clear();
checkCudaErrors( cudaFree(Plan_dev) );
checkCudaErrors( cudaFree(Indices_dev) );
checkCudaErrors( cudaFree(Sphere_dev) );
checkCudaErrors( cudaFree(Avg_NMLE_dev) );
checkCudaErrors( cudaFree(Space_count_dev) );
checkCudaErrors( cudaFree(Space_dev) );
checkCudaErrors( cudaFree(curr_nmle_dev) );
free(Sphere);
free(Avg_NMLE);
free(Plan);
free(equivalences);
free(Space);
free(Space_count);
return EQUA_buff;
}
void Project_on_primitives_cu(float *Projected_dev, int *BBox, float *VMap, float *NMap, float *RGB, int *Indices_Final_dev, float *Equations_dev, int nbPlans, float res, float epsilon, float alpha, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
ProjectionKernel<<<dimGrid, dimBlock>>>(Projected_dev, BBox, VMap, NMap, RGB, Indices_Final_dev, Equations_dev, nbPlans, res, epsilon, alpha, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
return;
}
void Segment_cu(unsigned char *Label, float *VMap, float *NMap, float *pose, float *Equations, int nbPlans, float epsilon, float alpha, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
float *pose_dev;
checkCudaErrors( cudaMalloc((void **) &pose_dev, 16*sizeof(float)) );
checkCudaErrors( cudaMemcpy(pose_dev, pose, 16 * sizeof(float), cudaMemcpyHostToDevice) );
for (int i = 0; i < 3; i++)
SegmentKernel<<<dimGrid, dimBlock>>>(Label, VMap, NMap, pose_dev, Equations, nbPlans, epsilon, alpha, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaFree(pose_dev) );
}
void InitFrags_cu(float *Projected_dev, int *Indices, int *Size, float *center, unsigned short *TheBumps_dev, unsigned char *TheRGBs_dev, unsigned char *TheMasks_dev, float *equation, int j, float res, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x); // !! ne pas inverser n et m !!
dimGrid.y = divUp (m, dimBlock.y);
InitFragsKernel<<<dimGrid, dimBlock>>>(Projected_dev, Indices, Size, center, TheBumps_dev, TheRGBs_dev, TheMasks_dev, equation, j, res, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
return;
}
void init_val(unsigned char *InOutput, unsigned char val, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
InitValKernel<<<dimGrid, dimBlock>>>(InOutput, val, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
return;
}
bool *ImDilate(bool *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( cudaMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( cudaMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
ImDilateKernel<<<dimGrid, dimBlock>>>(res_dev, Im, n, m, size);
checkCudaErrors( cudaDeviceSynchronize() );
return res_dev;
}
bool *ImDilate(unsigned char *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( cudaMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( cudaMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
ImDilateKernel<<<dimGrid, dimBlock>>>(res_dev, Im, n, m, size);
checkCudaErrors( cudaDeviceSynchronize() );
return res_dev;
}
bool *ImErode(bool *Im, int n, int m, int size) {
bool *res_dev;
checkCudaErrors( cudaMalloc((void **) &res_dev, n*m*sizeof(bool)) );
checkCudaErrors( cudaMemset(res_dev,0,n*m*sizeof(bool)) );
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
ImErodeKernel<<<dimGrid, dimBlock>>>(res_dev, Im, n, m, size);
checkCudaErrors( cudaDeviceSynchronize() );
return res_dev;
}
void VotePlan_cu(unsigned char *Label, float *VMap, float *centers, float *count, float *pose, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
float *pose_dev;
checkCudaErrors( cudaMalloc((void **) &pose_dev, 16*sizeof(float)) );
checkCudaErrors( cudaMemcpy(pose_dev, pose, 16 * sizeof(float), cudaMemcpyHostToDevice) );
VotePlanKernel<<<dimGrid, dimBlock>>>(Label, VMap, centers, count, pose_dev, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaFree(pose_dev) );
}
void AffectPlan_cu(unsigned char *Label, int *Buff, int n, int m) {
dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y);
dim3 dimGrid (1, 1, 1);
dimGrid.x = divUp (n, dimBlock.x);
dimGrid.y = divUp (m, dimBlock.y);
AffectPlanKernel<<<dimGrid, dimBlock>>>(Label, Buff, n, m);
checkCudaErrors( cudaDeviceSynchronize() );
}
void Affect(float **Tab, float *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
AffectKernel<<<dimGrid, dimBlock>>>(Tab, in, indx);
checkCudaErrors( cudaDeviceSynchronize() );
}
void AffectChar(unsigned char **Tab, unsigned char *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
AffectCharKernel<<<dimGrid, dimBlock>>>(Tab, in, indx);
checkCudaErrors( cudaDeviceSynchronize() );
}
void AffectShort(unsigned short **Tab, unsigned short *in, int indx) {
dim3 dimBlock(1, 1);
dim3 dimGrid (1, 1, 1);
AffectShortKernel<<<dimGrid, dimBlock>>>(Tab, in, indx);
checkCudaErrors( cudaDeviceSynchronize() );
} |
33213158e4b25bfbf863d29790f3d4c075ef50ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zswap.cu normal z -> d, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
double *A1;
double *A2;
int n, lda1, lda2;
} magmagpu_dswap_params_t;
__global__ void magmagpu_dswap( magmagpu_dswap_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
double *A1 = params.A1 + offset1;
double *A2 = params.A2 + offset2;
double temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
extern "C" void
magmablas_dswap_q(
magma_int_t n, double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_dswap_params_t params = { dA1T, dA2T, n, lda1, lda2 };
hipLaunchKernelGGL(( magmagpu_dswap), dim3(blocks), dim3(blocksize), 0, queue , params );
}
extern "C" void
magmablas_dswap(
magma_int_t n, double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2)
{
magmablas_dswap_q( n, dA1T, lda1, dA2T, lda2, magma_stream );
}
| 33213158e4b25bfbf863d29790f3d4c075ef50ab.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zswap.cu normal z -> d, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************
*
* SWAP BLAS: permute to set of N elements
*
********************************************************/
/*
* First version: line per line
*/
typedef struct {
double *A1;
double *A2;
int n, lda1, lda2;
} magmagpu_dswap_params_t;
__global__ void magmagpu_dswap( magmagpu_dswap_params_t params )
{
unsigned int x = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = x*params.lda1;
unsigned int offset2 = x*params.lda2;
if( x < params.n )
{
double *A1 = params.A1 + offset1;
double *A2 = params.A2 + offset2;
double temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
extern "C" void
magmablas_dswap_q(
magma_int_t n, double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2,
magma_queue_t queue )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magmagpu_dswap_params_t params = { dA1T, dA2T, n, lda1, lda2 };
magmagpu_dswap<<< blocks, blocksize, 0, queue >>>( params );
}
extern "C" void
magmablas_dswap(
magma_int_t n, double *dA1T, magma_int_t lda1,
double *dA2T, magma_int_t lda2)
{
magmablas_dswap_q( n, dA1T, lda1, dA2T, lda2, magma_stream );
}
|
a197b7df91d0441cd3161723d1254d490a021b3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zparic_kernels.cu, normal z -> s, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define PRECISION_s
__global__ void
magma_sparic_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const float * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
float *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if ( k < nnz ) {
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1]) {
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else {
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
s += sp; // undo the last operation (it must be the last)
// modify entry
if (i == j) // diagonal
val[il-1] = MAGMA_S_MAKE( sqrt( fabs( MAGMA_S_REAL(s) )), 0.0 );
else //sub-diagonal
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 116, 2015.
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_s_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sparic_csr(
magma_s_matrix A,
magma_s_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_sparic_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
| a197b7df91d0441cd3161723d1254d490a021b3b.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zparic_kernels.cu, normal z -> s, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define PRECISION_s
__global__ void
magma_sparic_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const float * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
float *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if ( k < nnz ) {
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1]) {
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else {
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
s += sp; // undo the last operation (it must be the last)
// modify entry
if (i == j) // diagonal
val[il-1] = MAGMA_S_MAKE( sqrt( fabs( MAGMA_S_REAL(s) )), 0.0 );
else //sub-diagonal
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1Ð16, 2015.
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_s_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sparic_csr(
magma_s_matrix A,
magma_s_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_sparic_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
|
fe78e7c87dcf2500864b24490d067141222479cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/tabulate.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator, typename Function>
__global__
void tabulate_kernel(Iterator first, Iterator last, Function f)
{
thrust::tabulate(thrust::seq, first, last, f);
}
void TestTabulateDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
using namespace thrust::placeholders;
typedef typename Vector::value_type T;
Vector v(5);
hipLaunchKernelGGL(( tabulate_kernel), dim3(1),dim3(1), 0, 0, v.begin(), v.end(), thrust::identity<T>());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
hipLaunchKernelGGL(( tabulate_kernel), dim3(1),dim3(1), 0, 0, v.begin(), v.end(), -_1);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], -1);
ASSERT_EQUAL(v[2], -2);
ASSERT_EQUAL(v[3], -3);
ASSERT_EQUAL(v[4], -4);
hipLaunchKernelGGL(( tabulate_kernel), dim3(1),dim3(1), 0, 0, v.begin(), v.end(), _1 * _1 * _1);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 27);
ASSERT_EQUAL(v[4], 64);
}
DECLARE_UNITTEST(TestTabulateDeviceSeq);
void TestTabulateCudaStreams()
{
using namespace thrust::placeholders;
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
Vector v(5);
hipStream_t s;
hipStreamCreate(&s);
thrust::tabulate(thrust::hip::par(s), v.begin(), v.end(), thrust::identity<T>());
hipStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
thrust::tabulate(thrust::hip::par(s), v.begin(), v.end(), -_1);
hipStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], -1);
ASSERT_EQUAL(v[2], -2);
ASSERT_EQUAL(v[3], -3);
ASSERT_EQUAL(v[4], -4);
thrust::tabulate(thrust::hip::par(s), v.begin(), v.end(), _1 * _1 * _1);
hipStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 27);
ASSERT_EQUAL(v[4], 64);
hipStreamSynchronize(s);
}
DECLARE_UNITTEST(TestTabulateCudaStreams);
| fe78e7c87dcf2500864b24490d067141222479cb.cu | #include <unittest/unittest.h>
#include <thrust/tabulate.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator, typename Function>
__global__
void tabulate_kernel(Iterator first, Iterator last, Function f)
{
thrust::tabulate(thrust::seq, first, last, f);
}
void TestTabulateDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
using namespace thrust::placeholders;
typedef typename Vector::value_type T;
Vector v(5);
tabulate_kernel<<<1,1>>>(v.begin(), v.end(), thrust::identity<T>());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
tabulate_kernel<<<1,1>>>(v.begin(), v.end(), -_1);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], -1);
ASSERT_EQUAL(v[2], -2);
ASSERT_EQUAL(v[3], -3);
ASSERT_EQUAL(v[4], -4);
tabulate_kernel<<<1,1>>>(v.begin(), v.end(), _1 * _1 * _1);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 27);
ASSERT_EQUAL(v[4], 64);
}
DECLARE_UNITTEST(TestTabulateDeviceSeq);
void TestTabulateCudaStreams()
{
using namespace thrust::placeholders;
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
Vector v(5);
cudaStream_t s;
cudaStreamCreate(&s);
thrust::tabulate(thrust::cuda::par(s), v.begin(), v.end(), thrust::identity<T>());
cudaStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
thrust::tabulate(thrust::cuda::par(s), v.begin(), v.end(), -_1);
cudaStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], -1);
ASSERT_EQUAL(v[2], -2);
ASSERT_EQUAL(v[3], -3);
ASSERT_EQUAL(v[4], -4);
thrust::tabulate(thrust::cuda::par(s), v.begin(), v.end(), _1 * _1 * _1);
cudaStreamSynchronize(s);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 27);
ASSERT_EQUAL(v[4], 64);
cudaStreamSynchronize(s);
}
DECLARE_UNITTEST(TestTabulateCudaStreams);
|
04cb1d06349ac28c34af5ed691301539a66c9848.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "HOGEngineDevice.h"
#include "HOGUtils.h"
#include "HOGConvolution.h"
#include "HOGHistogram.h"
#include "HOGSVMSlider.h"
#include "HOGScale.h"
#include "HOGPadding.h"
int hWidth, hHeight;
int hWidthROI, hHeightROI;
int hPaddedWidth, hPaddedHeight;
int rPaddedWidth, rPaddedHeight;
int minX, minY, maxX, maxY;
int hNoHistogramBins, rNoHistogramBins;
int hPaddingSizeX, hPaddingSizeY;
int hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY;
int hNoOfCellsX, hNoOfCellsY, hNoOfBlocksX, hNoOfBlocksY;
int rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX, rNoOfBlocksY;
int hNumberOfBlockPerWindowX, hNumberOfBlockPerWindowY;
int hNumberOfWindowsX, hNumberOfWindowsY;
int rNumberOfWindowsX, rNumberOfWindowsY;
float4 *paddedRegisteredImage;
float1 *paddedRegisteredGrayImage;
float1 *resizedPaddedImageF1;
float4 *resizedPaddedImageF4;
float2 *colorGradientsF2;
float1 *blockHistograms;
float1 *cellHistograms;
float1 *svmScores;
bool hUseGrayscale;
// uchar1* outputTest1;
uchar4* outputTest4;
float* hResult;
float scaleRatio;
float startScale;
float endScale;
int scaleCount;
int avSizeX, avSizeY, marginX, marginY;
extern uchar4* paddedRegisteredImageU4;
float1 *deviceImage;
__host__ void InitHOG(int width, int height,
int _avSizeX, int _avSizeY,
int _marginX, int _marginY,
int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY,
int windowSizeX, int windowSizeY,
int noOfHistogramBins, float wtscale,
float svmBias, float* svmWeights, int svmWeightsCount,
bool useGrayscale)
{
hipSetDevice( gpuGetMaxGflopsDeviceId() );
int i;
int toaddxx = 0, toaddxy = 0, toaddyx = 0, toaddyy = 0;
hWidth = width; hHeight = height;
avSizeX = _avSizeX; avSizeY = _avSizeY; marginX = _marginX; marginY = _marginY;
if (avSizeX) { toaddxx = hWidth * marginX / avSizeX; toaddxy = hHeight * marginY / avSizeX; }
if (avSizeY) { toaddyx = hWidth * marginX / avSizeY; toaddyy = hHeight * marginY / avSizeY; }
hPaddingSizeX = max(toaddxx, toaddyx); hPaddingSizeY = max(toaddxy, toaddyy);
hPaddedWidth = hWidth + hPaddingSizeX*2;
hPaddedHeight = hHeight + hPaddingSizeY*2;
hUseGrayscale = useGrayscale;
hNoHistogramBins = noOfHistogramBins;
hCellSizeX = cellSizeX; hCellSizeY = cellSizeY; hBlockSizeX = blockSizeX; hBlockSizeY = blockSizeY;
hWindowSizeX = windowSizeX; hWindowSizeY = windowSizeY;
hNoOfCellsX = hPaddedWidth / cellSizeX;
hNoOfCellsY = hPaddedHeight / cellSizeY;
hNoOfBlocksX = hNoOfCellsX - blockSizeX + 1;
hNoOfBlocksY = hNoOfCellsY - blockSizeY + 1;
hNumberOfBlockPerWindowX = (windowSizeX - cellSizeX * blockSizeX) / cellSizeX + 1;
hNumberOfBlockPerWindowY = (windowSizeY - cellSizeY * blockSizeY) / cellSizeY + 1;
hNumberOfWindowsX = 0;
for (i=0; i<hNumberOfBlockPerWindowX; i++) hNumberOfWindowsX += (hNoOfBlocksX-i)/hNumberOfBlockPerWindowX;
hNumberOfWindowsY = 0;
for (i=0; i<hNumberOfBlockPerWindowY; i++) hNumberOfWindowsY += (hNoOfBlocksY-i)/hNumberOfBlockPerWindowY;
scaleRatio = 1.05f;
startScale = 1.0f;
endScale = min(hPaddedWidth / (float) hWindowSizeX, hPaddedHeight / (float) hWindowSizeY);
scaleCount = (int)floor(logf(endScale/startScale)/logf(scaleRatio)) + 1;
checkCudaErrors(hipMalloc((void**) &paddedRegisteredImage, sizeof(float4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(hipMalloc((void**) &paddedRegisteredGrayImage, sizeof(float1) * hPaddedWidth * hPaddedHeight));
if (useGrayscale)
checkCudaErrors(hipMalloc((void**) &resizedPaddedImageF1, sizeof(float1) * hPaddedWidth * hPaddedHeight));
else
checkCudaErrors(hipMalloc((void**) &resizedPaddedImageF4, sizeof(float4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(hipMalloc((void**) &colorGradientsF2, sizeof(float2) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(hipMalloc((void**) &blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * cellSizeX * cellSizeY * hNoHistogramBins));
checkCudaErrors(hipMalloc((void**) &cellHistograms, sizeof(float1) * hNoOfCellsX * hNoOfCellsY * hNoHistogramBins));
checkCudaErrors(hipMalloc((void**) &svmScores, sizeof(float1) * hNumberOfWindowsX * hNumberOfWindowsY * scaleCount));
InitConvolution(hPaddedWidth, hPaddedHeight, useGrayscale);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, wtscale);
InitSVM(svmBias, svmWeights, svmWeightsCount);
InitScale(hPaddedWidth, hPaddedHeight);
InitPadding(hPaddedWidth, hPaddedHeight);
rPaddedWidth = hPaddedWidth;
rPaddedHeight = hPaddedHeight;
// if (useGrayscale)
// checkCudaErrors(hipMalloc((void**) &outputTest1, sizeof(uchar1) * hPaddedWidth * hPaddedHeight));
// else
checkCudaErrors(hipMalloc((void**) &outputTest4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(hipHostMalloc((void**)&hResult, sizeof(float) * hNumberOfWindowsX * hNumberOfWindowsY * scaleCount));
}
__host__ void CloseHOG()
{
checkCudaErrors(hipFree(paddedRegisteredImage));
checkCudaErrors(hipFree(paddedRegisteredGrayImage));
if (hUseGrayscale)
checkCudaErrors(hipFree(resizedPaddedImageF1));
else
checkCudaErrors(hipFree(resizedPaddedImageF4));
checkCudaErrors(hipFree(colorGradientsF2));
checkCudaErrors(hipFree(blockHistograms));
checkCudaErrors(hipFree(cellHistograms));
checkCudaErrors(hipFree(svmScores));
CloseConvolution();
CloseHistogram();
CloseSVM();
CloseScale();
ClosePadding();
// if (hUseGrayscale)
// checkCudaErrors(hipFree(outputTest1));
// else
checkCudaErrors(hipFree(outputTest4));
checkCudaErrors(hipHostFree(hResult));
hipDeviceReset();
}
__host__ void BeginHOGProcessing(unsigned char* hostImage, int minx, int miny, int maxx, int maxy, float minScale, float maxScale)
{
int i;
minX = minx; minY = miny; maxX = maxx; maxY = maxy;
if (hUseGrayscale) {
PadHostGrayImage((uchar4*)hostImage, paddedRegisteredGrayImage, minX, minY, maxX, maxY);
} else {
PadHostImage((uchar4*)hostImage, paddedRegisteredImage, minX, minY, maxX, maxY);
}
rPaddedWidth = hPaddedWidth; rPaddedHeight = hPaddedHeight;
scaleRatio = 1.05f;
startScale = (minScale < 0.0f) ? 1.0f : minScale;
endScale = (maxScale < 0.0f) ? min(hPaddedWidth / (float) hWindowSizeX, hPaddedHeight / (float) hWindowSizeY) : maxScale;
scaleCount = (int)floor(logf(endScale/startScale)/logf(scaleRatio)) + 1;
float currentScale = startScale;
ResetSVMScores(svmScores);
for (i=0; i<scaleCount; i++)
{
if (hUseGrayscale) {
DownscaleGrayImage(0, scaleCount, i, currentScale, hUseGrayscale, paddedRegisteredGrayImage, resizedPaddedImageF1);
} else {
DownscaleImage(0, scaleCount, i, currentScale, hUseGrayscale, paddedRegisteredImage, resizedPaddedImageF1, resizedPaddedImageF4);
}
SetConvolutionSize(rPaddedWidth, rPaddedHeight);
if (hUseGrayscale) ComputeColorGradients1to2(resizedPaddedImageF1, colorGradientsF2);
else ComputeColorGradients4to2(resizedPaddedImageF4, colorGradientsF2);
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, hNoHistogramBins,
hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY, rPaddedWidth, rPaddedHeight);
NormalizeBlockHistograms(blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, rPaddedWidth, rPaddedHeight);
LinearSVMEvaluation(svmScores, blockHistograms, hNoHistogramBins, hWindowSizeX, hWindowSizeY, hCellSizeX, hCellSizeY,
hBlockSizeX, hBlockSizeY, rNoOfBlocksX, rNoOfBlocksY, i, rPaddedWidth, rPaddedHeight);
currentScale *= scaleRatio;
}
}
__host__ float* EndHOGProcessing()
{
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(hResult, svmScores, sizeof(float) * scaleCount * hNumberOfWindowsX * hNumberOfWindowsY, hipMemcpyDeviceToHost));
return hResult;
}
__host__ void CalculateHOGDescriptor(float *hostImage, int width, int height, int noOfHistogramBins,
int windowSizeX, int windowSizeY, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, float *hostDesc, float *gradient)
{
hipSetDevice(gpuGetMaxGflopsDeviceId());
int noOfBlocksX;
int noOfBlocksY;
float1 *deviceImage;
float2 *colorGradientsF2;
float1 *blockHistograms;
int y;
noOfBlocksX = width / cellSizeX - blockSizeX + 1;
noOfBlocksY = height / cellSizeY - blockSizeY + 1;
checkCudaErrors(hipMalloc((void**)&deviceImage, sizeof(float1) * width * height));
checkCudaErrors(hipMalloc((void**)&colorGradientsF2, sizeof(float2) * width * height));
checkCudaErrors(hipMalloc((void**)&blockHistograms, sizeof(float1) * noOfBlocksX * noOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins));
checkCudaErrors(hipMalloc((void**) &outputTest4, sizeof(uchar4) * width * height));
InitConvolution(width, height, true);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, 2.0);
checkCudaErrors(hipMemcpy2D(deviceImage, sizeof(float1) * width, hostImage, sizeof(float1) * width, sizeof(float1) * width, height, hipMemcpyHostToDevice));
SetConvolutionSize(width, height);
ComputeColorGradients1to2(deviceImage, colorGradientsF2);
checkCudaErrors(hipMemcpy(colorGradientsF2, colorGradientsF2 + width, sizeof(float2) * width, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(colorGradientsF2 + width * (height - 1), colorGradientsF2 + width * (height - 2), sizeof(float2) * width, hipMemcpyDeviceToDevice));
for (y = 0; y < height; y++) {
checkCudaErrors(hipMemcpy(colorGradientsF2 + y * width, colorGradientsF2 + y * width + 1, sizeof(float2), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(colorGradientsF2 + y * width + width - 1, colorGradientsF2 + y * width + width - 2, sizeof(float2), hipMemcpyDeviceToDevice));
}
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, noOfHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, windowSizeX, windowSizeY, width, height);
NormalizeBlockHistograms(blockHistograms, noOfHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, width, height);
Float2ToUchar4(colorGradientsF2, outputTest4, width, height, 0);
checkCudaErrors(hipMemcpy2D(gradient, width * sizeof(float1), outputTest4, width * sizeof(float1), width * sizeof(float1), height, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(hostDesc, blockHistograms, sizeof(float1) * noOfBlocksX * noOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(deviceImage));
checkCudaErrors(hipFree(colorGradientsF2));
checkCudaErrors(hipFree(blockHistograms));
checkCudaErrors(hipFree(outputTest4));
CloseConvolution();
CloseHistogram();
hipDeviceReset();
hipDeviceSynchronize();
}
__host__ void InitHOGDescriptorCalculator(int width, int height, int noOfHistogramBins,
int windowSizeX, int windowSizeY, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, float wtscale)
{
hWidth = width;
hHeight = height;
hNoHistogramBins = noOfHistogramBins;
hWindowSizeX = windowSizeX;
hWindowSizeY = windowSizeY;
hCellSizeX = cellSizeX;
hCellSizeY = cellSizeY;
hBlockSizeX = blockSizeX;
hBlockSizeY = blockSizeY;
hNoOfBlocksX = width / cellSizeX - blockSizeX + 1;
hNoOfBlocksY = height / cellSizeY - blockSizeY + 1;
checkCudaErrors(hipMalloc((void**)&deviceImage, sizeof(float1) * width * height));
checkCudaErrors(hipMalloc((void**)&colorGradientsF2, sizeof(float2) * width * height));
checkCudaErrors(hipMalloc((void**)&blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins));
checkCudaErrors(hipMalloc((void**) &outputTest4, sizeof(uchar4) * width * height));
InitConvolution(width, height, true);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, wtscale);
}
__host__ void HOGDescriptorCalculator(float *hostImage, float *hostDesc)
{
int y;
checkCudaErrors(hipMemcpy2D(deviceImage, sizeof(float1) * hWidth, hostImage, sizeof(float1) * hWidth, sizeof(float1) * hWidth, hHeight, hipMemcpyHostToDevice));
SetConvolutionSize(hWidth, hHeight);
ComputeColorGradients1to2(deviceImage, colorGradientsF2);
checkCudaErrors(hipMemcpy(colorGradientsF2, colorGradientsF2 + hWidth, sizeof(float2) * hWidth, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(colorGradientsF2 + hWidth * (hHeight - 1), colorGradientsF2 + hWidth * (hHeight - 2), sizeof(float2) * hWidth, hipMemcpyDeviceToDevice));
for (y = 0; y < hHeight; y++) {
checkCudaErrors(hipMemcpy(colorGradientsF2 + y * hWidth, colorGradientsF2 + y * hWidth + 1, sizeof(float2), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(colorGradientsF2 + y * hWidth + hWidth - 1, colorGradientsF2 + y * hWidth + hWidth - 2, sizeof(float2), hipMemcpyDeviceToDevice));
}
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY, hWidth, hHeight);
NormalizeBlockHistograms(blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWidth, hHeight);
checkCudaErrors(hipMemcpy(hostDesc, blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * hBlockSizeX * hBlockSizeY * hNoHistogramBins, hipMemcpyDeviceToHost));
}
__host__ void HOGDescriptorCalculatorOG(float *hostImage, float *hostDesc, float *gradient)
{
HOGDescriptorCalculator(hostImage, hostDesc);
Float2ToUchar4(colorGradientsF2, outputTest4, hWidth, hHeight, 0);
checkCudaErrors(hipMemcpy2D(gradient, hWidth * sizeof(float1), outputTest4, hWidth * sizeof(float1), hWidth * sizeof(float1), hHeight, hipMemcpyDeviceToHost));
}
__host__ void FreeHOGDescriptorCalculator()
{
checkCudaErrors(hipFree(deviceImage));
checkCudaErrors(hipFree(colorGradientsF2));
checkCudaErrors(hipFree(blockHistograms));
checkCudaErrors(hipFree(outputTest4));
CloseConvolution();
CloseHistogram();
hipDeviceReset();
hipDeviceSynchronize();
}
__host__ void GetProcessedImage(unsigned char* hostImage, int imageType)
{
switch (imageType)
{
case 0:
Float4ToUchar4(resizedPaddedImageF4, outputTest4, rPaddedWidth, rPaddedHeight);
break;
case 1:
Float2ToUchar4(colorGradientsF2, outputTest4, rPaddedWidth, rPaddedHeight, 0);
break;
case 2:
Float2ToUchar4(colorGradientsF2, outputTest4, rPaddedWidth, rPaddedHeight, 1);
break;
case 3:
checkCudaErrors(hipMemcpy(hostImage, paddedRegisteredImageU4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight, hipMemcpyDeviceToHost));
return;
case 4:
checkCudaErrors(hipMemcpy2D(((uchar4*)hostImage) + minX + minY * hWidth, hWidth * sizeof(uchar4),
paddedRegisteredImageU4 + hPaddingSizeX + hPaddingSizeY * hPaddedWidth, hPaddedWidth * sizeof(uchar4),
hWidthROI * sizeof(uchar4), hHeightROI, hipMemcpyDeviceToHost));
return;
}
checkCudaErrors(hipMemcpy2D(hostImage, hPaddedWidth * sizeof(uchar4), outputTest4, rPaddedWidth * sizeof(uchar4),
rPaddedWidth * sizeof(uchar4), rPaddedHeight, hipMemcpyDeviceToHost));
//checkCudaErrors(hipMemcpy(hostImage, paddedRegisteredImage, sizeof(uchar4) * hPaddedWidth * hPaddedHeight, hipMemcpyDeviceToHost));
}
__host__ void GetHOGDescriptor(float *hostDesc)
{
checkCudaErrors(hipMemcpy(hostDesc, blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * hCellSizeX * hCellSizeY * hNoHistogramBins, hipMemcpyDeviceToHost));
}
__host__ void GetHOGParameters(float *cStartScale, float *cEndScale, float *cScaleRatio, int *cScaleCount,
int *cPaddingSizeX, int *cPaddingSizeY, int *cPaddedWidth, int *cPaddedHeight,
int *cNoOfCellsX, int *cNoOfCellsY, int *cNoOfBlocksX, int *cNoOfBlocksY,
int *cNumberOfWindowsX, int *cNumberOfWindowsY,
int *cNumberOfBlockPerWindowX, int *cNumberOfBlockPerWindowY)
{
*cStartScale = startScale;
*cEndScale = endScale;
*cScaleRatio = scaleRatio;
*cScaleCount = scaleCount;
*cPaddingSizeX = hPaddingSizeX;
*cPaddingSizeY = hPaddingSizeY;
*cPaddedWidth = hPaddedWidth;
*cPaddedHeight = hPaddedHeight;
*cNoOfCellsX = hNoOfCellsX;
*cNoOfCellsY = hNoOfCellsY;
*cNoOfBlocksX = hNoOfBlocksX;
*cNoOfBlocksY = hNoOfBlocksY;
*cNumberOfWindowsX = hNumberOfWindowsX;
*cNumberOfWindowsY = hNumberOfWindowsY;
*cNumberOfBlockPerWindowX = hNumberOfBlockPerWindowX;
*cNumberOfBlockPerWindowY = hNumberOfBlockPerWindowY;
}
hipArray *imageArray2 = 0;
texture<float4, 2, hipReadModeElementType> tex2;
hipChannelFormatDesc channelDescDownscale2;
__global__ void resizeFastBicubic3(float4 *outputFloat, float4* paddedRegisteredImage, int width, int height, float scale)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int i = __umul24(y, width) + x;
float u = x*scale;
float v = y*scale;
if (x < width && y < height)
{
float4 cF;
if (scale == 1.0f)
cF = paddedRegisteredImage[x + y * width];
else
cF = tex2D(tex2, u, v);
outputFloat[i] = cF;
}
}
__host__ void DownscaleImage2(float scale, float4* paddedRegisteredImage,
float4* resizedPaddedImageF4, int width, int height,
int &rPaddedWidth, int &rPaddedHeight)
{
dim3 hThreadSize, hBlockSize;
hThreadSize = dim3(THREAD_SIZE_W, THREAD_SIZE_H);
rPaddedWidth = iDivUpF(width, scale);
rPaddedHeight = iDivUpF(height, scale);
hBlockSize = dim3(iDivUp(rPaddedWidth, hThreadSize.x), iDivUp(rPaddedHeight, hThreadSize.y));
checkCudaErrors(hipMemcpyToArray(imageArray2, 0, 0, paddedRegisteredImage, sizeof(float4) * width * height, hipMemcpyDeviceToDevice));
checkCudaErrors(hipBindTextureToArray(tex2, imageArray2, channelDescDownscale2));
checkCudaErrors(hipMemset(resizedPaddedImageF4, 0, width * height * sizeof(float4)));
hipLaunchKernelGGL(( resizeFastBicubic3), dim3(hBlockSize), dim3(hThreadSize), 0, 0, (float4*)resizedPaddedImageF4, (float4*)paddedRegisteredImage, rPaddedWidth, rPaddedHeight, scale);
checkCudaErrors(hipUnbindTexture(tex2));
}
__host__ float3* CUDAImageRescale(float3* src, int width, int height, int &rWidth, int &rHeight, float scale)
{
int i, j, offsetC, offsetL;
float4* srcH; float4* srcD;
float4* dstD; float4* dstH;
float3 val3; float4 val4;
channelDescDownscale2 = hipCreateChannelDesc<float4>();
tex2.filterMode = hipFilterModeLinear; tex2.normalized = false;
hipMalloc((void**)&srcD, sizeof(float4) * width * height);
hipMalloc((void**)&dstD, sizeof(float4) * width * height);
hipHostMalloc((void**)&srcH, sizeof(float4) * width * height);
hipHostMalloc((void**)&dstH, sizeof(float4) * width * height);
checkCudaErrors(hipMallocArray(&imageArray2, &channelDescDownscale2, width, height) );
for (i=0; i<width; i++)
{
for (j=0; j<height; j++)
{
offsetC = j + i * height;
offsetL = j * width + i;
val3 = src[offsetC];
srcH[offsetL].x = val3.x;
srcH[offsetL].y = val3.y;
srcH[offsetL].z = val3.z;
}
}
hipMemcpy(srcD, srcH, sizeof(float4) * width * height, hipMemcpyHostToDevice);
DownscaleImage2(scale, srcD, dstD, width, height, rWidth, rHeight);
hipMemcpy(dstH, dstD, sizeof(float4) * rWidth * rHeight, hipMemcpyDeviceToHost);
float3* dst = (float3*) malloc (rWidth * rHeight * sizeof(float3));
for (i=0; i<rWidth; i++)
{
for (j=0; j<rHeight; j++)
{
offsetC = j + i * rHeight;
offsetL = j * rWidth + i;
val4 = dstH[offsetL];
dst[offsetC].x = val4.x;
dst[offsetC].y = val4.y;
dst[offsetC].z = val4.z;
}
}
checkCudaErrors(hipFreeArray(imageArray2));
hipFree(srcD);
hipFree(dstD);
hipHostFree(srcH);
hipHostFree(dstH);
return dst;
}
| 04cb1d06349ac28c34af5ed691301539a66c9848.cu | #include "HOGEngineDevice.h"
#include "HOGUtils.h"
#include "HOGConvolution.h"
#include "HOGHistogram.h"
#include "HOGSVMSlider.h"
#include "HOGScale.h"
#include "HOGPadding.h"
int hWidth, hHeight;
int hWidthROI, hHeightROI;
int hPaddedWidth, hPaddedHeight;
int rPaddedWidth, rPaddedHeight;
int minX, minY, maxX, maxY;
int hNoHistogramBins, rNoHistogramBins;
int hPaddingSizeX, hPaddingSizeY;
int hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY;
int hNoOfCellsX, hNoOfCellsY, hNoOfBlocksX, hNoOfBlocksY;
int rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX, rNoOfBlocksY;
int hNumberOfBlockPerWindowX, hNumberOfBlockPerWindowY;
int hNumberOfWindowsX, hNumberOfWindowsY;
int rNumberOfWindowsX, rNumberOfWindowsY;
float4 *paddedRegisteredImage;
float1 *paddedRegisteredGrayImage;
float1 *resizedPaddedImageF1;
float4 *resizedPaddedImageF4;
float2 *colorGradientsF2;
float1 *blockHistograms;
float1 *cellHistograms;
float1 *svmScores;
bool hUseGrayscale;
// uchar1* outputTest1;
uchar4* outputTest4;
float* hResult;
float scaleRatio;
float startScale;
float endScale;
int scaleCount;
int avSizeX, avSizeY, marginX, marginY;
extern uchar4* paddedRegisteredImageU4;
float1 *deviceImage;
__host__ void InitHOG(int width, int height,
int _avSizeX, int _avSizeY,
int _marginX, int _marginY,
int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY,
int windowSizeX, int windowSizeY,
int noOfHistogramBins, float wtscale,
float svmBias, float* svmWeights, int svmWeightsCount,
bool useGrayscale)
{
cudaSetDevice( gpuGetMaxGflopsDeviceId() );
int i;
int toaddxx = 0, toaddxy = 0, toaddyx = 0, toaddyy = 0;
hWidth = width; hHeight = height;
avSizeX = _avSizeX; avSizeY = _avSizeY; marginX = _marginX; marginY = _marginY;
if (avSizeX) { toaddxx = hWidth * marginX / avSizeX; toaddxy = hHeight * marginY / avSizeX; }
if (avSizeY) { toaddyx = hWidth * marginX / avSizeY; toaddyy = hHeight * marginY / avSizeY; }
hPaddingSizeX = max(toaddxx, toaddyx); hPaddingSizeY = max(toaddxy, toaddyy);
hPaddedWidth = hWidth + hPaddingSizeX*2;
hPaddedHeight = hHeight + hPaddingSizeY*2;
hUseGrayscale = useGrayscale;
hNoHistogramBins = noOfHistogramBins;
hCellSizeX = cellSizeX; hCellSizeY = cellSizeY; hBlockSizeX = blockSizeX; hBlockSizeY = blockSizeY;
hWindowSizeX = windowSizeX; hWindowSizeY = windowSizeY;
hNoOfCellsX = hPaddedWidth / cellSizeX;
hNoOfCellsY = hPaddedHeight / cellSizeY;
hNoOfBlocksX = hNoOfCellsX - blockSizeX + 1;
hNoOfBlocksY = hNoOfCellsY - blockSizeY + 1;
hNumberOfBlockPerWindowX = (windowSizeX - cellSizeX * blockSizeX) / cellSizeX + 1;
hNumberOfBlockPerWindowY = (windowSizeY - cellSizeY * blockSizeY) / cellSizeY + 1;
hNumberOfWindowsX = 0;
for (i=0; i<hNumberOfBlockPerWindowX; i++) hNumberOfWindowsX += (hNoOfBlocksX-i)/hNumberOfBlockPerWindowX;
hNumberOfWindowsY = 0;
for (i=0; i<hNumberOfBlockPerWindowY; i++) hNumberOfWindowsY += (hNoOfBlocksY-i)/hNumberOfBlockPerWindowY;
scaleRatio = 1.05f;
startScale = 1.0f;
endScale = min(hPaddedWidth / (float) hWindowSizeX, hPaddedHeight / (float) hWindowSizeY);
scaleCount = (int)floor(logf(endScale/startScale)/logf(scaleRatio)) + 1;
checkCudaErrors(cudaMalloc((void**) &paddedRegisteredImage, sizeof(float4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(cudaMalloc((void**) &paddedRegisteredGrayImage, sizeof(float1) * hPaddedWidth * hPaddedHeight));
if (useGrayscale)
checkCudaErrors(cudaMalloc((void**) &resizedPaddedImageF1, sizeof(float1) * hPaddedWidth * hPaddedHeight));
else
checkCudaErrors(cudaMalloc((void**) &resizedPaddedImageF4, sizeof(float4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(cudaMalloc((void**) &colorGradientsF2, sizeof(float2) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(cudaMalloc((void**) &blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * cellSizeX * cellSizeY * hNoHistogramBins));
checkCudaErrors(cudaMalloc((void**) &cellHistograms, sizeof(float1) * hNoOfCellsX * hNoOfCellsY * hNoHistogramBins));
checkCudaErrors(cudaMalloc((void**) &svmScores, sizeof(float1) * hNumberOfWindowsX * hNumberOfWindowsY * scaleCount));
InitConvolution(hPaddedWidth, hPaddedHeight, useGrayscale);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, wtscale);
InitSVM(svmBias, svmWeights, svmWeightsCount);
InitScale(hPaddedWidth, hPaddedHeight);
InitPadding(hPaddedWidth, hPaddedHeight);
rPaddedWidth = hPaddedWidth;
rPaddedHeight = hPaddedHeight;
// if (useGrayscale)
// checkCudaErrors(cudaMalloc((void**) &outputTest1, sizeof(uchar1) * hPaddedWidth * hPaddedHeight));
// else
checkCudaErrors(cudaMalloc((void**) &outputTest4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(cudaMallocHost((void**)&hResult, sizeof(float) * hNumberOfWindowsX * hNumberOfWindowsY * scaleCount));
}
__host__ void CloseHOG()
{
checkCudaErrors(cudaFree(paddedRegisteredImage));
checkCudaErrors(cudaFree(paddedRegisteredGrayImage));
if (hUseGrayscale)
checkCudaErrors(cudaFree(resizedPaddedImageF1));
else
checkCudaErrors(cudaFree(resizedPaddedImageF4));
checkCudaErrors(cudaFree(colorGradientsF2));
checkCudaErrors(cudaFree(blockHistograms));
checkCudaErrors(cudaFree(cellHistograms));
checkCudaErrors(cudaFree(svmScores));
CloseConvolution();
CloseHistogram();
CloseSVM();
CloseScale();
ClosePadding();
// if (hUseGrayscale)
// checkCudaErrors(cudaFree(outputTest1));
// else
checkCudaErrors(cudaFree(outputTest4));
checkCudaErrors(cudaFreeHost(hResult));
cudaThreadExit();
}
__host__ void BeginHOGProcessing(unsigned char* hostImage, int minx, int miny, int maxx, int maxy, float minScale, float maxScale)
{
int i;
minX = minx; minY = miny; maxX = maxx; maxY = maxy;
if (hUseGrayscale) {
PadHostGrayImage((uchar4*)hostImage, paddedRegisteredGrayImage, minX, minY, maxX, maxY);
} else {
PadHostImage((uchar4*)hostImage, paddedRegisteredImage, minX, minY, maxX, maxY);
}
rPaddedWidth = hPaddedWidth; rPaddedHeight = hPaddedHeight;
scaleRatio = 1.05f;
startScale = (minScale < 0.0f) ? 1.0f : minScale;
endScale = (maxScale < 0.0f) ? min(hPaddedWidth / (float) hWindowSizeX, hPaddedHeight / (float) hWindowSizeY) : maxScale;
scaleCount = (int)floor(logf(endScale/startScale)/logf(scaleRatio)) + 1;
float currentScale = startScale;
ResetSVMScores(svmScores);
for (i=0; i<scaleCount; i++)
{
if (hUseGrayscale) {
DownscaleGrayImage(0, scaleCount, i, currentScale, hUseGrayscale, paddedRegisteredGrayImage, resizedPaddedImageF1);
} else {
DownscaleImage(0, scaleCount, i, currentScale, hUseGrayscale, paddedRegisteredImage, resizedPaddedImageF1, resizedPaddedImageF4);
}
SetConvolutionSize(rPaddedWidth, rPaddedHeight);
if (hUseGrayscale) ComputeColorGradients1to2(resizedPaddedImageF1, colorGradientsF2);
else ComputeColorGradients4to2(resizedPaddedImageF4, colorGradientsF2);
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, hNoHistogramBins,
hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY, rPaddedWidth, rPaddedHeight);
NormalizeBlockHistograms(blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, rPaddedWidth, rPaddedHeight);
LinearSVMEvaluation(svmScores, blockHistograms, hNoHistogramBins, hWindowSizeX, hWindowSizeY, hCellSizeX, hCellSizeY,
hBlockSizeX, hBlockSizeY, rNoOfBlocksX, rNoOfBlocksY, i, rPaddedWidth, rPaddedHeight);
currentScale *= scaleRatio;
}
}
__host__ float* EndHOGProcessing()
{
cudaThreadSynchronize();
checkCudaErrors(cudaMemcpy(hResult, svmScores, sizeof(float) * scaleCount * hNumberOfWindowsX * hNumberOfWindowsY, cudaMemcpyDeviceToHost));
return hResult;
}
__host__ void CalculateHOGDescriptor(float *hostImage, int width, int height, int noOfHistogramBins,
int windowSizeX, int windowSizeY, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, float *hostDesc, float *gradient)
{
cudaSetDevice(gpuGetMaxGflopsDeviceId());
int noOfBlocksX;
int noOfBlocksY;
float1 *deviceImage;
float2 *colorGradientsF2;
float1 *blockHistograms;
int y;
noOfBlocksX = width / cellSizeX - blockSizeX + 1;
noOfBlocksY = height / cellSizeY - blockSizeY + 1;
checkCudaErrors(cudaMalloc((void**)&deviceImage, sizeof(float1) * width * height));
checkCudaErrors(cudaMalloc((void**)&colorGradientsF2, sizeof(float2) * width * height));
checkCudaErrors(cudaMalloc((void**)&blockHistograms, sizeof(float1) * noOfBlocksX * noOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins));
checkCudaErrors(cudaMalloc((void**) &outputTest4, sizeof(uchar4) * width * height));
InitConvolution(width, height, true);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, 2.0);
checkCudaErrors(cudaMemcpy2D(deviceImage, sizeof(float1) * width, hostImage, sizeof(float1) * width, sizeof(float1) * width, height, cudaMemcpyHostToDevice));
SetConvolutionSize(width, height);
ComputeColorGradients1to2(deviceImage, colorGradientsF2);
checkCudaErrors(cudaMemcpy(colorGradientsF2, colorGradientsF2 + width, sizeof(float2) * width, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(colorGradientsF2 + width * (height - 1), colorGradientsF2 + width * (height - 2), sizeof(float2) * width, cudaMemcpyDeviceToDevice));
for (y = 0; y < height; y++) {
checkCudaErrors(cudaMemcpy(colorGradientsF2 + y * width, colorGradientsF2 + y * width + 1, sizeof(float2), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(colorGradientsF2 + y * width + width - 1, colorGradientsF2 + y * width + width - 2, sizeof(float2), cudaMemcpyDeviceToDevice));
}
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, noOfHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, windowSizeX, windowSizeY, width, height);
NormalizeBlockHistograms(blockHistograms, noOfHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, width, height);
Float2ToUchar4(colorGradientsF2, outputTest4, width, height, 0);
checkCudaErrors(cudaMemcpy2D(gradient, width * sizeof(float1), outputTest4, width * sizeof(float1), width * sizeof(float1), height, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(hostDesc, blockHistograms, sizeof(float1) * noOfBlocksX * noOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(deviceImage));
checkCudaErrors(cudaFree(colorGradientsF2));
checkCudaErrors(cudaFree(blockHistograms));
checkCudaErrors(cudaFree(outputTest4));
CloseConvolution();
CloseHistogram();
cudaThreadExit();
cudaThreadSynchronize();
}
__host__ void InitHOGDescriptorCalculator(int width, int height, int noOfHistogramBins,
int windowSizeX, int windowSizeY, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, float wtscale)
{
hWidth = width;
hHeight = height;
hNoHistogramBins = noOfHistogramBins;
hWindowSizeX = windowSizeX;
hWindowSizeY = windowSizeY;
hCellSizeX = cellSizeX;
hCellSizeY = cellSizeY;
hBlockSizeX = blockSizeX;
hBlockSizeY = blockSizeY;
hNoOfBlocksX = width / cellSizeX - blockSizeX + 1;
hNoOfBlocksY = height / cellSizeY - blockSizeY + 1;
checkCudaErrors(cudaMalloc((void**)&deviceImage, sizeof(float1) * width * height));
checkCudaErrors(cudaMalloc((void**)&colorGradientsF2, sizeof(float2) * width * height));
checkCudaErrors(cudaMalloc((void**)&blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * blockSizeX * blockSizeY * noOfHistogramBins));
checkCudaErrors(cudaMalloc((void**) &outputTest4, sizeof(uchar4) * width * height));
InitConvolution(width, height, true);
InitHistograms(cellSizeX, cellSizeY, blockSizeX, blockSizeY, noOfHistogramBins, wtscale);
}
__host__ void HOGDescriptorCalculator(float *hostImage, float *hostDesc)
{
int y;
checkCudaErrors(cudaMemcpy2D(deviceImage, sizeof(float1) * hWidth, hostImage, sizeof(float1) * hWidth, sizeof(float1) * hWidth, hHeight, cudaMemcpyHostToDevice));
SetConvolutionSize(hWidth, hHeight);
ComputeColorGradients1to2(deviceImage, colorGradientsF2);
checkCudaErrors(cudaMemcpy(colorGradientsF2, colorGradientsF2 + hWidth, sizeof(float2) * hWidth, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(colorGradientsF2 + hWidth * (hHeight - 1), colorGradientsF2 + hWidth * (hHeight - 2), sizeof(float2) * hWidth, cudaMemcpyDeviceToDevice));
for (y = 0; y < hHeight; y++) {
checkCudaErrors(cudaMemcpy(colorGradientsF2 + y * hWidth, colorGradientsF2 + y * hWidth + 1, sizeof(float2), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(colorGradientsF2 + y * hWidth + hWidth - 1, colorGradientsF2 + y * hWidth + hWidth - 2, sizeof(float2), cudaMemcpyDeviceToDevice));
}
ComputeBlockHistogramsWithGauss(colorGradientsF2, blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWindowSizeX, hWindowSizeY, hWidth, hHeight);
NormalizeBlockHistograms(blockHistograms, hNoHistogramBins, hCellSizeX, hCellSizeY, hBlockSizeX, hBlockSizeY, hWidth, hHeight);
checkCudaErrors(cudaMemcpy(hostDesc, blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * hBlockSizeX * hBlockSizeY * hNoHistogramBins, cudaMemcpyDeviceToHost));
}
__host__ void HOGDescriptorCalculatorOG(float *hostImage, float *hostDesc, float *gradient)
{
HOGDescriptorCalculator(hostImage, hostDesc);
Float2ToUchar4(colorGradientsF2, outputTest4, hWidth, hHeight, 0);
checkCudaErrors(cudaMemcpy2D(gradient, hWidth * sizeof(float1), outputTest4, hWidth * sizeof(float1), hWidth * sizeof(float1), hHeight, cudaMemcpyDeviceToHost));
}
__host__ void FreeHOGDescriptorCalculator()
{
checkCudaErrors(cudaFree(deviceImage));
checkCudaErrors(cudaFree(colorGradientsF2));
checkCudaErrors(cudaFree(blockHistograms));
checkCudaErrors(cudaFree(outputTest4));
CloseConvolution();
CloseHistogram();
cudaThreadExit();
cudaThreadSynchronize();
}
__host__ void GetProcessedImage(unsigned char* hostImage, int imageType)
{
switch (imageType)
{
case 0:
Float4ToUchar4(resizedPaddedImageF4, outputTest4, rPaddedWidth, rPaddedHeight);
break;
case 1:
Float2ToUchar4(colorGradientsF2, outputTest4, rPaddedWidth, rPaddedHeight, 0);
break;
case 2:
Float2ToUchar4(colorGradientsF2, outputTest4, rPaddedWidth, rPaddedHeight, 1);
break;
case 3:
checkCudaErrors(cudaMemcpy(hostImage, paddedRegisteredImageU4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight, cudaMemcpyDeviceToHost));
return;
case 4:
checkCudaErrors(cudaMemcpy2D(((uchar4*)hostImage) + minX + minY * hWidth, hWidth * sizeof(uchar4),
paddedRegisteredImageU4 + hPaddingSizeX + hPaddingSizeY * hPaddedWidth, hPaddedWidth * sizeof(uchar4),
hWidthROI * sizeof(uchar4), hHeightROI, cudaMemcpyDeviceToHost));
return;
}
checkCudaErrors(cudaMemcpy2D(hostImage, hPaddedWidth * sizeof(uchar4), outputTest4, rPaddedWidth * sizeof(uchar4),
rPaddedWidth * sizeof(uchar4), rPaddedHeight, cudaMemcpyDeviceToHost));
//checkCudaErrors(cudaMemcpy(hostImage, paddedRegisteredImage, sizeof(uchar4) * hPaddedWidth * hPaddedHeight, cudaMemcpyDeviceToHost));
}
__host__ void GetHOGDescriptor(float *hostDesc)
{
checkCudaErrors(cudaMemcpy(hostDesc, blockHistograms, sizeof(float1) * hNoOfBlocksX * hNoOfBlocksY * hCellSizeX * hCellSizeY * hNoHistogramBins, cudaMemcpyDeviceToHost));
}
__host__ void GetHOGParameters(float *cStartScale, float *cEndScale, float *cScaleRatio, int *cScaleCount,
int *cPaddingSizeX, int *cPaddingSizeY, int *cPaddedWidth, int *cPaddedHeight,
int *cNoOfCellsX, int *cNoOfCellsY, int *cNoOfBlocksX, int *cNoOfBlocksY,
int *cNumberOfWindowsX, int *cNumberOfWindowsY,
int *cNumberOfBlockPerWindowX, int *cNumberOfBlockPerWindowY)
{
*cStartScale = startScale;
*cEndScale = endScale;
*cScaleRatio = scaleRatio;
*cScaleCount = scaleCount;
*cPaddingSizeX = hPaddingSizeX;
*cPaddingSizeY = hPaddingSizeY;
*cPaddedWidth = hPaddedWidth;
*cPaddedHeight = hPaddedHeight;
*cNoOfCellsX = hNoOfCellsX;
*cNoOfCellsY = hNoOfCellsY;
*cNoOfBlocksX = hNoOfBlocksX;
*cNoOfBlocksY = hNoOfBlocksY;
*cNumberOfWindowsX = hNumberOfWindowsX;
*cNumberOfWindowsY = hNumberOfWindowsY;
*cNumberOfBlockPerWindowX = hNumberOfBlockPerWindowX;
*cNumberOfBlockPerWindowY = hNumberOfBlockPerWindowY;
}
cudaArray *imageArray2 = 0;
texture<float4, 2, cudaReadModeElementType> tex2;
cudaChannelFormatDesc channelDescDownscale2;
__global__ void resizeFastBicubic3(float4 *outputFloat, float4* paddedRegisteredImage, int width, int height, float scale)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int i = __umul24(y, width) + x;
float u = x*scale;
float v = y*scale;
if (x < width && y < height)
{
float4 cF;
if (scale == 1.0f)
cF = paddedRegisteredImage[x + y * width];
else
cF = tex2D(tex2, u, v);
outputFloat[i] = cF;
}
}
__host__ void DownscaleImage2(float scale, float4* paddedRegisteredImage,
float4* resizedPaddedImageF4, int width, int height,
int &rPaddedWidth, int &rPaddedHeight)
{
dim3 hThreadSize, hBlockSize;
hThreadSize = dim3(THREAD_SIZE_W, THREAD_SIZE_H);
rPaddedWidth = iDivUpF(width, scale);
rPaddedHeight = iDivUpF(height, scale);
hBlockSize = dim3(iDivUp(rPaddedWidth, hThreadSize.x), iDivUp(rPaddedHeight, hThreadSize.y));
checkCudaErrors(cudaMemcpyToArray(imageArray2, 0, 0, paddedRegisteredImage, sizeof(float4) * width * height, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaBindTextureToArray(tex2, imageArray2, channelDescDownscale2));
checkCudaErrors(cudaMemset(resizedPaddedImageF4, 0, width * height * sizeof(float4)));
resizeFastBicubic3<<<hBlockSize, hThreadSize>>>((float4*)resizedPaddedImageF4, (float4*)paddedRegisteredImage, rPaddedWidth, rPaddedHeight, scale);
checkCudaErrors(cudaUnbindTexture(tex2));
}
__host__ float3* CUDAImageRescale(float3* src, int width, int height, int &rWidth, int &rHeight, float scale)
{
int i, j, offsetC, offsetL;
float4* srcH; float4* srcD;
float4* dstD; float4* dstH;
float3 val3; float4 val4;
channelDescDownscale2 = cudaCreateChannelDesc<float4>();
tex2.filterMode = cudaFilterModeLinear; tex2.normalized = false;
cudaMalloc((void**)&srcD, sizeof(float4) * width * height);
cudaMalloc((void**)&dstD, sizeof(float4) * width * height);
cudaMallocHost((void**)&srcH, sizeof(float4) * width * height);
cudaMallocHost((void**)&dstH, sizeof(float4) * width * height);
checkCudaErrors(cudaMallocArray(&imageArray2, &channelDescDownscale2, width, height) );
for (i=0; i<width; i++)
{
for (j=0; j<height; j++)
{
offsetC = j + i * height;
offsetL = j * width + i;
val3 = src[offsetC];
srcH[offsetL].x = val3.x;
srcH[offsetL].y = val3.y;
srcH[offsetL].z = val3.z;
}
}
cudaMemcpy(srcD, srcH, sizeof(float4) * width * height, cudaMemcpyHostToDevice);
DownscaleImage2(scale, srcD, dstD, width, height, rWidth, rHeight);
cudaMemcpy(dstH, dstD, sizeof(float4) * rWidth * rHeight, cudaMemcpyDeviceToHost);
float3* dst = (float3*) malloc (rWidth * rHeight * sizeof(float3));
for (i=0; i<rWidth; i++)
{
for (j=0; j<rHeight; j++)
{
offsetC = j + i * rHeight;
offsetL = j * rWidth + i;
val4 = dstH[offsetL];
dst[offsetC].x = val4.x;
dst[offsetC].y = val4.y;
dst[offsetC].z = val4.z;
}
}
checkCudaErrors(cudaFreeArray(imageArray2));
cudaFree(srcD);
cudaFree(dstD);
cudaFreeHost(srcH);
cudaFreeHost(dstH);
return dst;
}
|
75003c31c205adaf74117875c5eaa9d3ae5a4ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#if defined(__HIPCC__) && (TORCH_HIP_VERSION >= 7000)
#include <cusolverDn.h>
#endif
#include <rocblas.h>
#include <hipfft.h>
#include "Utilities.cuh"
#define DEBUG
/*******************/
/* iDivUp FUNCTION */
/*******************/
//extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
#if (__CUDACC_VER__ >= 80000)
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusolverGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
#endif
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
case HIPBLAS_STATUS_NOT_SUPPORTED:
return "HIPBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(hipblasStatus_t err, const char *file, const int line)
{
if (HIPBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cublasGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cublasSafeCall(hipblasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
static const char *_cufftGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void __cufftSafeCall(hipfftResult err, const char *file, const int line)
{
if (HIPFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n \nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cufftGetErrorEnum(err));
hipDeviceReset(); assert(0);
}
}
extern "C" void cufftSafeCall(hipfftResult err) { __cufftSafeCall(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(hipsparseStatus_t error)
{
switch (error)
{
case HIPSPARSE_STATUS_SUCCESS:
return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "HIPSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(hipsparseStatus_t err, const char *file, const int line)
{
if (HIPSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusparseGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cusparseSafeCall(hipsparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(hipMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(hipMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(hipPeekAtLastError());
// gpuErrchk(hipDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float> (float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(hipblasDgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
| 75003c31c205adaf74117875c5eaa9d3ae5a4ad6.cu | #include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "cuda_runtime.h"
#include <cuda.h>
#if defined(__CUDACC__) && (CUDA_VERSION >= 7000)
#include <cusolverDn.h>
#endif
#include <cublas_v2.h>
#include <cufft.h>
#include "Utilities.cuh"
#define DEBUG
/*******************/
/* iDivUp FUNCTION */
/*******************/
//extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
#if (__CUDACC_VER__ >= 80000)
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusolverGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
#endif
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line)
{
if (CUBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cublasGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cublasSafeCall(cublasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
static const char *_cufftGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if (CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n \nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cufftGetErrorEnum(err));
cudaDeviceReset(); assert(0);
}
}
extern "C" void cufftSafeCall(cufftResult err) { __cufftSafeCall(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusparseGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(cudaMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(cudaMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(cudaPeekAtLastError());
// gpuErrchk(cudaDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float> (float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(cublasDgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
|
ebff6e76b880f1d6d1da93f30875019dd39f1166.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include "stdio.h"
#include "hip/hip_runtime.h"
__global__ void nestedHelloWorld(int const iSize, int iDepth){
int tid = threadIdx.x;
printf("Recursion = %d: Hello World from thread %d block %d \n", iDepth, tid, blockIdx.x);
// condition to stop recursive execution
if (iSize == 1) return;
// reduce blocksize to half
int nthreads = iSize >> 1;
// thread 0 launches child grid recursively
if(tid == 0 && nthreads > 0){
hipLaunchKernelGGL(( nestedHelloWorld), dim3(1), dim3(nthreads), 0, 0, nthreads, ++iDepth);
printf("-------> nested execution depth: %d \n", iDepth);
}
}
int main(int argc, char **argv)
{
int size = 8;
int blocksize = 8; // initial block size
int igrid = 1;
if(argc > 1)
{
igrid = atoi(argv[1]);
size = igrid * blocksize;
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x,
block.x);
hipLaunchKernelGGL(( nestedHelloWorld), dim3(grid), dim3(block), 0, 0, block.x, 0);
CHECK(hipGetLastError());
CHECK(hipDeviceReset());
return 0;
} | ebff6e76b880f1d6d1da93f30875019dd39f1166.cu | #include "../common/common.h"
#include "stdio.h"
#include "cuda_runtime.h"
__global__ void nestedHelloWorld(int const iSize, int iDepth){
int tid = threadIdx.x;
printf("Recursion = %d: Hello World from thread %d block %d \n", iDepth, tid, blockIdx.x);
// condition to stop recursive execution
if (iSize == 1) return;
// reduce blocksize to half
int nthreads = iSize >> 1;
// thread 0 launches child grid recursively
if(tid == 0 && nthreads > 0){
nestedHelloWorld<<<1, nthreads>>>(nthreads, ++iDepth);
printf("-------> nested execution depth: %d \n", iDepth);
}
}
int main(int argc, char **argv)
{
int size = 8;
int blocksize = 8; // initial block size
int igrid = 1;
if(argc > 1)
{
igrid = atoi(argv[1]);
size = igrid * blocksize;
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x,
block.x);
nestedHelloWorld<<<grid, block>>>(block.x, 0);
CHECK(cudaGetLastError());
CHECK(cudaDeviceReset());
return 0;
} |
d6b8fd302c675894c051311369b0c4b932521015.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
void stupidfunction() {
float* a = (float*)malloc(100000 * sizeof(float));
}
int main(int argc, char* argv[]){
hipError_t error;
hipDeviceProp_t prop;
int count; //stores the number of CUDA compatible devices
error = hipGetDeviceCount(&count); //get the number of devices with compute capability < 1.0
if(error != hipSuccess){ //if there is an error getting the device count
std::cout<<"ERROR calling hipGetDeviceCount()"<<std::endl; //display an error message
return error; //return the error
}
std::cout<<"Number of CUDA devices: "<<count<<std::endl;
std::cout<<"Device 0 Properties-------------------------"<<std::endl;
error = hipGetDeviceProperties(&prop, 0); //get the properties for the first CUDA device
if(error != hipSuccess){ //if there is an error getting the device properties
std::cout<<"ERROR calling hipGetDeviceProperties()"<<std::endl; //display an error message
return error; //return the error
}
stupidfunction();
std::cout<<"Name: "<<prop.name<<std::endl
<<"Global Memory: "<<(double)prop.totalGlobalMem/1024/1000000<<" Gb"<<std::endl
<<"Shared Memory/block: "<<(double)prop.sharedMemPerBlock/1024<<" Kb"<<std::endl
<<"Registers/block: "<<prop.regsPerBlock<<std::endl
<<"Warp Size: "<<prop.warpSize<<std::endl
<<"Max Threads/block: "<<prop.maxThreadsPerBlock<<std::endl
<<"Max Block Dimensions: ["
<<prop.maxThreadsDim[0]<<" x "
<<prop.maxThreadsDim[1]<<" x "
<<prop.maxThreadsDim[2]<<"]"<<std::endl
<<"Max Grid Dimensions: ["
<<prop.maxGridSize[0]<<" x "
<<prop.maxGridSize[1]<<" x "
<<prop.maxGridSize[2]<<"]"<<std::endl
<<"Constant Memory: "<<(double)prop.totalConstMem/1024<<" Kb"<<std::endl
<<"Compute Capability: "<<prop.major<<"."<<prop.minor<<std::endl
<<"Clock Rate: "<<(double)prop.clockRate/1000000<<" GHz"<<std::endl;
std::ofstream fout;
fout.open("output.txt");
fout<<"Name: "<<prop.name<<std::endl
<<"Global Memory: "<<(double)prop.totalGlobalMem/1024/1000000<<" Gb"<<std::endl
<<"Shared Memory/block: "<<(double)prop.sharedMemPerBlock/1024<<" Kb"<<std::endl
<<"Registers/block: "<<prop.regsPerBlock<<std::endl
<<"Warp Size: "<<prop.warpSize<<std::endl
<<"Max Threads/block: "<<prop.maxThreadsPerBlock<<std::endl
<<"Max Block Dimensions: ["
<<prop.maxThreadsDim[0]<<" x "
<<prop.maxThreadsDim[1]<<" x "
<<prop.maxThreadsDim[2]<<"]"<<std::endl
<<"Max Grid Dimensions: ["
<<prop.maxGridSize[0]<<" x "
<<prop.maxGridSize[1]<<" x "
<<prop.maxGridSize[2]<<"]"<<std::endl
<<"Constant Memory: "<<(double)prop.totalConstMem/1024<<" Kb"<<std::endl
<<"Compute Capability: "<<prop.major<<"."<<prop.minor<<std::endl
<<"Clock Rate: "<<(double)prop.clockRate/1000000<<" GHz"<<std::endl;
fout<<std::flush; fout.close();
} | d6b8fd302c675894c051311369b0c4b932521015.cu | #include <iostream>
#include <fstream>
void stupidfunction() {
float* a = (float*)malloc(100000 * sizeof(float));
}
int main(int argc, char* argv[]){
cudaError_t error;
cudaDeviceProp prop;
int count; //stores the number of CUDA compatible devices
error = cudaGetDeviceCount(&count); //get the number of devices with compute capability < 1.0
if(error != cudaSuccess){ //if there is an error getting the device count
std::cout<<"ERROR calling cudaGetDeviceCount()"<<std::endl; //display an error message
return error; //return the error
}
std::cout<<"Number of CUDA devices: "<<count<<std::endl;
std::cout<<"Device 0 Properties-------------------------"<<std::endl;
error = cudaGetDeviceProperties(&prop, 0); //get the properties for the first CUDA device
if(error != cudaSuccess){ //if there is an error getting the device properties
std::cout<<"ERROR calling cudaGetDeviceProperties()"<<std::endl; //display an error message
return error; //return the error
}
stupidfunction();
std::cout<<"Name: "<<prop.name<<std::endl
<<"Global Memory: "<<(double)prop.totalGlobalMem/1024/1000000<<" Gb"<<std::endl
<<"Shared Memory/block: "<<(double)prop.sharedMemPerBlock/1024<<" Kb"<<std::endl
<<"Registers/block: "<<prop.regsPerBlock<<std::endl
<<"Warp Size: "<<prop.warpSize<<std::endl
<<"Max Threads/block: "<<prop.maxThreadsPerBlock<<std::endl
<<"Max Block Dimensions: ["
<<prop.maxThreadsDim[0]<<" x "
<<prop.maxThreadsDim[1]<<" x "
<<prop.maxThreadsDim[2]<<"]"<<std::endl
<<"Max Grid Dimensions: ["
<<prop.maxGridSize[0]<<" x "
<<prop.maxGridSize[1]<<" x "
<<prop.maxGridSize[2]<<"]"<<std::endl
<<"Constant Memory: "<<(double)prop.totalConstMem/1024<<" Kb"<<std::endl
<<"Compute Capability: "<<prop.major<<"."<<prop.minor<<std::endl
<<"Clock Rate: "<<(double)prop.clockRate/1000000<<" GHz"<<std::endl;
std::ofstream fout;
fout.open("output.txt");
fout<<"Name: "<<prop.name<<std::endl
<<"Global Memory: "<<(double)prop.totalGlobalMem/1024/1000000<<" Gb"<<std::endl
<<"Shared Memory/block: "<<(double)prop.sharedMemPerBlock/1024<<" Kb"<<std::endl
<<"Registers/block: "<<prop.regsPerBlock<<std::endl
<<"Warp Size: "<<prop.warpSize<<std::endl
<<"Max Threads/block: "<<prop.maxThreadsPerBlock<<std::endl
<<"Max Block Dimensions: ["
<<prop.maxThreadsDim[0]<<" x "
<<prop.maxThreadsDim[1]<<" x "
<<prop.maxThreadsDim[2]<<"]"<<std::endl
<<"Max Grid Dimensions: ["
<<prop.maxGridSize[0]<<" x "
<<prop.maxGridSize[1]<<" x "
<<prop.maxGridSize[2]<<"]"<<std::endl
<<"Constant Memory: "<<(double)prop.totalConstMem/1024<<" Kb"<<std::endl
<<"Compute Capability: "<<prop.major<<"."<<prop.minor<<std::endl
<<"Clock Rate: "<<(double)prop.clockRate/1000000<<" GHz"<<std::endl;
fout<<std::flush; fout.close();
} |
8d8ab66de1b020dae0aab01dfcb4fd12ac7f836e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = src(y - top, x - left);
}
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left,
const typename VecTraits<T>::elem_type* borderValue, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd);
hipLaunchKernelGGL(( copyMakeBorder), dim3(grid), dim3(block), 0, stream, brdSrc, dst, top, left);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode,
const T* borderValue, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type vec_type;
typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, hipStream_t stream);
static const caller_t callers[5] =
{
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
};
callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream);
}
template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | 8d8ab66de1b020dae0aab01dfcb4fd12ac7f836e.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = src(y - top, x - left);
}
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left,
const typename VecTraits<T>::elem_type* borderValue, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd);
copyMakeBorder<<<grid, block, 0, stream>>>(brdSrc, dst, top, left);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode,
const T* borderValue, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type vec_type;
typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, cudaStream_t stream);
static const caller_t callers[5] =
{
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
};
callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream);
}
template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
b2b25ec1eec13d9953249ac7dbbfb73dc4520296.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
This file is inspired by
https://github.com/quiver-team/torch-quiver/blob/main/srcs/cpp/src/quiver/cuda/quiver_sample.cu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <ostream>
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#include <hiprand_kernel.h>
#else
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/graph_khop_sampler_imp.h"
#include "paddle/fluid/operators/graph_khop_sampler_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/place.h"
constexpr int WARP_SIZE = 32;
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct MaxFunctor {
T cap;
HOSTDEVICE explicit inline MaxFunctor(T cap) { this->cap = cap; }
HOSTDEVICE inline T operator()(T x) const {
if (x > cap) {
return cap;
}
return x;
}
};
template <typename T>
struct DegreeFunctor {
const T* dst_count;
HOSTDEVICE explicit inline DegreeFunctor(const T* x) { this->dst_count = x; }
HOSTDEVICE inline T operator()(T i) const {
return dst_count[i + 1] - dst_count[i];
}
};
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GraphSampleNeighborsCUDAKernel(const uint64_t rand_seed,
int k,
const int64_t num_rows,
const T* in_rows,
const T* src,
const T* dst_count,
const T* src_eids,
T* outputs,
T* outputs_eids,
T* output_ptr,
T* output_idxs,
bool return_eids) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
#ifdef PADDLE_WITH_HIP
hiprandState rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x,
threadIdx.y * WARP_SIZE + threadIdx.x,
0,
&rng);
#else
hiprandState_t rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x,
threadIdx.y * WARP_SIZE + threadIdx.x,
0,
&rng);
#endif
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = dst_count[row];
const int64_t deg = dst_count[row + 1] - in_row_start;
const int64_t out_row_start = output_ptr[out_row];
if (deg <= k) {
for (int idx = threadIdx.x; idx < deg; idx += WARP_SIZE) {
const T in_idx = in_row_start + idx;
outputs[out_row_start + idx] = src[in_idx];
if (return_eids) {
outputs_eids[out_row_start + idx] = src_eids[in_idx];
}
}
} else {
for (int idx = threadIdx.x; idx < k; idx += WARP_SIZE) {
output_idxs[out_row_start + idx] = idx;
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
for (int idx = k + threadIdx.x; idx < deg; idx += WARP_SIZE) {
#ifdef PADDLE_WITH_HIP
const int num = hiprand(&rng) % (idx + 1);
#else
const int num = hiprand(&rng) % (idx + 1);
#endif
if (num < k) {
paddle::platform::CudaAtomicMax(output_idxs + out_row_start + num,
idx);
}
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
for (int idx = threadIdx.x; idx < k; idx += WARP_SIZE) {
const T perm_idx = output_idxs[out_row_start + idx] + in_row_start;
outputs[out_row_start + idx] = src[perm_idx];
if (return_eids) {
outputs_eids[out_row_start + idx] = src_eids[perm_idx];
}
}
}
out_row += BLOCK_WARPS;
}
}
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GetDstEdgeCUDAKernel(const int64_t num_rows,
const T* in_rows,
const T* dst_sample_counts,
const T* dst_ptr,
T* outputs) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t dst_sample_size = dst_sample_counts[out_row];
const int64_t out_row_start = dst_ptr[out_row];
for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) {
outputs[out_row_start + idx] = row;
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
out_row += BLOCK_WARPS;
}
}
template <typename T>
void SampleNeighbors(const framework::ExecutionContext& ctx,
const T* src,
const T* dst_count,
const T* src_eids,
thrust::device_vector<T>* inputs,
thrust::device_vector<T>* outputs,
thrust::device_vector<T>* output_counts,
thrust::device_vector<T>* outputs_eids,
int k,
bool is_first_layer,
bool is_last_layer,
bool return_eids) {
const size_t bs = inputs->size();
output_counts->resize(bs);
// 1. Get input nodes' degree.
thrust::transform(inputs->begin(),
inputs->end(),
output_counts->begin(),
DegreeFunctor<T>(dst_count));
// 2. Apply sample size k to get final sample size.
if (k >= 0) {
thrust::transform(output_counts->begin(),
output_counts->end(),
output_counts->begin(),
MaxFunctor<T>(k));
}
// 3. Get the number of total sample neighbors and some necessary datas.
T total_sample_num =
thrust::reduce(output_counts->begin(), output_counts->end());
if (is_first_layer) {
PADDLE_ENFORCE_GT(
total_sample_num,
0,
platform::errors::InvalidArgument(
"The input nodes `X` should have at least one neighbor, "
"but none of the input nodes have neighbors."));
}
outputs->resize(total_sample_num);
if (return_eids) {
outputs_eids->resize(total_sample_num);
}
thrust::device_vector<T> output_ptr;
thrust::device_vector<T> output_idxs;
output_ptr.resize(bs);
output_idxs.resize(total_sample_num);
thrust::exclusive_scan(
output_counts->begin(), output_counts->end(), output_ptr.begin(), 0);
// 4. Run graph sample kernel.
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((bs + TILE_SIZE - 1) / TILE_SIZE);
hipLaunchKernelGGL(( GraphSampleNeighborsCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), 0,
k,
bs,
thrust::raw_pointer_cast(inputs->data()),
src,
dst_count,
src_eids,
thrust::raw_pointer_cast(outputs->data()),
thrust::raw_pointer_cast(outputs_eids->data()),
thrust::raw_pointer_cast(output_ptr.data()),
thrust::raw_pointer_cast(output_idxs.data()),
return_eids);
// 5. Get inputs = outputs - inputs:
if (!is_last_layer) {
thrust::sort(inputs->begin(), inputs->end());
thrust::device_vector<T> outputs_sort(outputs->size());
thrust::copy(outputs->begin(), outputs->end(), outputs_sort.begin());
thrust::sort(outputs_sort.begin(), outputs_sort.end());
auto outputs_sort_end =
thrust::unique(outputs_sort.begin(), outputs_sort.end());
outputs_sort.resize(
thrust::distance(outputs_sort.begin(), outputs_sort_end));
thrust::device_vector<T> unique_outputs(outputs_sort.size());
auto unique_outputs_end = thrust::set_difference(outputs_sort.begin(),
outputs_sort.end(),
inputs->begin(),
inputs->end(),
unique_outputs.begin());
inputs->resize(
thrust::distance(unique_outputs.begin(), unique_outputs_end));
thrust::copy(unique_outputs.begin(), unique_outputs_end, inputs->begin());
}
}
template <typename T>
void FillHashTable(const framework::ExecutionContext& ctx,
const T* input,
int64_t num_input,
int64_t len_hashtable,
thrust::device_vector<T>* unique_items,
thrust::device_vector<T>* keys,
thrust::device_vector<T>* values,
thrust::device_vector<int64_t>* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
const auto& dev_ctx = ctx.cuda_device_context();
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// 1. Insert data into keys and values.
hipLaunchKernelGGL(( BuildHashTable<T>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), input,
num_input,
len_hashtable,
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(key_index->data()));
// 2. Get item index count.
thrust::device_vector<int> item_count(num_input + 1, 0);
hipLaunchKernelGGL(( GetItemIndexCount<T>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), input,
thrust::raw_pointer_cast(item_count.data()),
num_input,
len_hashtable,
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(key_index->data()));
thrust::exclusive_scan(
item_count.begin(), item_count.end(), item_count.begin());
size_t total_unique_items = item_count[num_input];
unique_items->resize(total_unique_items);
// 3. Get unique items.
hipLaunchKernelGGL(( FillUniqueItems<T>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), input,
num_input,
len_hashtable,
thrust::raw_pointer_cast(unique_items->data()),
thrust::raw_pointer_cast(item_count.data()),
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(values->data()),
thrust::raw_pointer_cast(key_index->data()));
}
template <typename T>
void ReindexFunc(const framework::ExecutionContext& ctx,
thrust::device_vector<T>* inputs,
thrust::device_vector<T>* outputs,
thrust::device_vector<T>* subset,
thrust::device_vector<T>* orig_nodes,
thrust::device_vector<T>* reindex_nodes,
int bs) {
subset->resize(inputs->size() + outputs->size());
thrust::copy(inputs->begin(), inputs->end(), subset->begin());
thrust::copy(
outputs->begin(), outputs->end(), subset->begin() + inputs->size());
thrust::device_vector<T> unique_items;
unique_items.clear();
// Fill hash table.
int64_t num = subset->size();
int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1));
int64_t size = log_num << 1;
thrust::device_vector<T> keys(size, -1);
thrust::device_vector<T> values(size, -1);
thrust::device_vector<int64_t> key_index(size, -1);
FillHashTable<T>(ctx,
thrust::raw_pointer_cast(subset->data()),
subset->size(),
size,
&unique_items,
&keys,
&values,
&key_index);
subset->resize(unique_items.size());
thrust::copy(unique_items.begin(), unique_items.end(), subset->begin());
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t grid_tmp = (outputs->size() + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ReindexSrcOutput<T>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), thrust::raw_pointer_cast(outputs->data()),
outputs->size(),
size,
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(values.data()));
int grid_ = (bs + block - 1) / block;
hipLaunchKernelGGL(( ReindexInputNodes<T>)
, dim3(grid_),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(), thrust::raw_pointer_cast(orig_nodes->data()),
bs,
thrust::raw_pointer_cast(reindex_nodes->data()),
size,
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(values.data()));
}
template <typename DeviceContext, typename T>
class GraphKhopSamplerOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// 1. Get sample neighbors operators' inputs.
auto* src = ctx.Input<Tensor>("Row");
auto* dst_count = ctx.Input<Tensor>("Col_Ptr");
auto* vertices = ctx.Input<Tensor>("X");
std::vector<int> sample_sizes = ctx.Attr<std::vector<int>>("sample_sizes");
bool return_eids = ctx.Attr<bool>("return_eids");
const T* src_data = src->data<T>();
const T* dst_count_data = dst_count->data<T>();
const T* p_vertices = vertices->data<T>();
const int bs = vertices->dims()[0];
// 2. Get unique input nodes(X).
thrust::device_vector<T> inputs(bs);
thrust::copy(p_vertices, p_vertices + bs, inputs.begin());
auto unique_inputs_end = thrust::unique(inputs.begin(), inputs.end());
inputs.resize(thrust::distance(inputs.begin(), unique_inputs_end));
// 3. Sample neighbors. We should distinguish w/o "Src_Eids".
thrust::device_vector<T> outputs;
thrust::device_vector<T> output_counts;
thrust::device_vector<T> outputs_eids;
std::vector<thrust::device_vector<T>> dst_vec;
dst_vec.emplace_back(inputs);
std::vector<thrust::device_vector<T>> outputs_vec;
std::vector<thrust::device_vector<T>> output_counts_vec;
std::vector<thrust::device_vector<T>> outputs_eids_vec;
const size_t num_layers = sample_sizes.size();
bool is_last_layer = false, is_first_layer = true;
if (return_eids) {
auto* src_eids = ctx.Input<Tensor>("Eids");
const T* src_eids_data = src_eids->data<T>();
for (int i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
is_first_layer = false;
dst_vec.emplace_back(inputs);
}
SampleNeighbors<T>(ctx,
src_data,
dst_count_data,
src_eids_data,
&inputs,
&outputs,
&output_counts,
&outputs_eids,
sample_sizes[i],
is_first_layer,
is_last_layer,
return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
} else {
for (int i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
is_first_layer = false;
dst_vec.emplace_back(inputs);
}
SampleNeighbors<T>(ctx,
src_data,
dst_count_data,
nullptr,
&inputs,
&outputs,
&output_counts,
&outputs_eids,
sample_sizes[i],
is_first_layer,
is_last_layer,
return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
}
// 4. Concat intermediate sample results
// Including src_merge, unique_dst_merge and dst_sample_counts_merge.
thrust::device_vector<T> unique_dst_merge; // unique dst
thrust::device_vector<T> src_merge; // src
thrust::device_vector<T> dst_sample_counts_merge; // dst degree
int64_t unique_dst_size = 0, src_size = 0;
for (int i = 0; i < num_layers; i++) {
unique_dst_size += dst_vec[i].size();
src_size += outputs_vec[i].size();
}
unique_dst_merge.resize(unique_dst_size);
src_merge.resize(src_size);
dst_sample_counts_merge.resize(unique_dst_size);
auto unique_dst_merge_ptr = unique_dst_merge.begin();
auto src_merge_ptr = src_merge.begin();
auto dst_sample_counts_merge_ptr = dst_sample_counts_merge.begin();
for (int i = 0; i < num_layers; i++) {
if (i == 0) {
unique_dst_merge_ptr = thrust::copy(
dst_vec[i].begin(), dst_vec[i].end(), unique_dst_merge.begin());
src_merge_ptr = thrust::copy(
outputs_vec[i].begin(), outputs_vec[i].end(), src_merge.begin());
dst_sample_counts_merge_ptr =
thrust::copy(output_counts_vec[i].begin(),
output_counts_vec[i].end(),
dst_sample_counts_merge.begin());
} else {
unique_dst_merge_ptr = thrust::copy(
dst_vec[i].begin(), dst_vec[i].end(), unique_dst_merge_ptr);
src_merge_ptr = thrust::copy(
outputs_vec[i].begin(), outputs_vec[i].end(), src_merge_ptr);
dst_sample_counts_merge_ptr = thrust::copy(output_counts_vec[i].begin(),
output_counts_vec[i].end(),
dst_sample_counts_merge_ptr);
}
}
// 5. Return eids results.
if (return_eids) {
thrust::device_vector<T> eids_merge;
eids_merge.resize(src_size);
auto eids_merge_ptr = eids_merge.begin();
for (int i = 0; i < num_layers; i++) {
if (i == 0) {
eids_merge_ptr = thrust::copy(outputs_eids_vec[i].begin(),
outputs_eids_vec[i].end(),
eids_merge.begin());
} else {
eids_merge_ptr = thrust::copy(outputs_eids_vec[i].begin(),
outputs_eids_vec[i].end(),
eids_merge_ptr);
}
}
auto* out_eids = ctx.Output<Tensor>("Out_Eids");
out_eids->Resize({static_cast<int>(eids_merge.size())});
T* p_out_eids = out_eids->mutable_data<T>(ctx.GetPlace());
thrust::copy(eids_merge.begin(), eids_merge.end(), p_out_eids);
}
int64_t num_sample_edges = thrust::reduce(dst_sample_counts_merge.begin(),
dst_sample_counts_merge.end());
PADDLE_ENFORCE_EQ(
src_merge.size(),
num_sample_edges,
platform::errors::PreconditionNotMet(
"Number of sample edges dismatch, the sample kernel has error."));
// 6. Get hashtable according to unique_dst_merge and src_merge.
// We can get unique items(subset) and reindex src nodes of sample edges.
// We also get Reindex_X for input nodes here.
thrust::device_vector<T> orig_nodes(bs);
thrust::copy(p_vertices, p_vertices + bs, orig_nodes.begin());
thrust::device_vector<T> reindex_nodes(bs);
thrust::device_vector<T> subset;
ReindexFunc<T>(ctx,
&unique_dst_merge,
&src_merge,
&subset,
&orig_nodes,
&reindex_nodes,
bs);
auto* reindex_x = ctx.Output<Tensor>("Reindex_X");
T* p_reindex_x = reindex_x->mutable_data<T>(ctx.GetPlace());
thrust::copy(reindex_nodes.begin(), reindex_nodes.end(), p_reindex_x);
auto* sample_index = ctx.Output<Tensor>("Sample_Index");
sample_index->Resize({static_cast<int>(subset.size())});
T* p_sample_index = sample_index->mutable_data<T>(ctx.GetPlace());
thrust::copy(subset.begin(), subset.end(), p_sample_index); // Done!
// 7. Reindex dst nodes of sample edges.
thrust::device_vector<T> dst_merge(src_size);
thrust::device_vector<T> unique_dst_merge_reindex(unique_dst_size);
thrust::sequence(unique_dst_merge_reindex.begin(),
unique_dst_merge_reindex.end());
thrust::device_vector<T> dst_ptr(unique_dst_size);
thrust::exclusive_scan(dst_sample_counts_merge.begin(),
dst_sample_counts_merge.end(),
dst_ptr.begin());
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((unique_dst_size + TILE_SIZE - 1) / TILE_SIZE);
hipLaunchKernelGGL(( GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>)
, dim3(grid),
dim3(block),
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream(),
unique_dst_size,
thrust::raw_pointer_cast(unique_dst_merge_reindex.data()),
thrust::raw_pointer_cast(dst_sample_counts_merge.data()),
thrust::raw_pointer_cast(dst_ptr.data()),
thrust::raw_pointer_cast(dst_merge.data()));
// 8. Give operator's outputs.
auto* out_src = ctx.Output<Tensor>("Out_Src");
auto* out_dst = ctx.Output<Tensor>("Out_Dst");
out_src->Resize({static_cast<int>(src_merge.size()), 1});
out_dst->Resize({static_cast<int>(src_merge.size()), 1});
T* p_out_src = out_src->mutable_data<T>(ctx.GetPlace());
T* p_out_dst = out_dst->mutable_data<T>(ctx.GetPlace());
const size_t& memset_bytes = src_merge.size() * sizeof(T);
thrust::copy(src_merge.begin(), src_merge.end(), p_out_src);
thrust::copy(dst_merge.begin(), dst_merge.end(), p_out_dst);
}
};
} // namespace operators
} // namespace paddle
using CUDA = phi::GPUContext;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(graph_khop_sampler,
ops::GraphKhopSamplerOpCUDAKernel<CUDA, int32_t>,
ops::GraphKhopSamplerOpCUDAKernel<CUDA, int64_t>);
| b2b25ec1eec13d9953249ac7dbbfb73dc4520296.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
This file is inspired by
https://github.com/quiver-team/torch-quiver/blob/main/srcs/cpp/src/quiver/cuda/quiver_sample.cu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <ostream>
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#include <hiprand_kernel.h>
#else
#include <cuda_runtime.h>
#include <curand_kernel.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/graph_khop_sampler_imp.h"
#include "paddle/fluid/operators/graph_khop_sampler_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/place.h"
constexpr int WARP_SIZE = 32;
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct MaxFunctor {
T cap;
HOSTDEVICE explicit inline MaxFunctor(T cap) { this->cap = cap; }
HOSTDEVICE inline T operator()(T x) const {
if (x > cap) {
return cap;
}
return x;
}
};
template <typename T>
struct DegreeFunctor {
const T* dst_count;
HOSTDEVICE explicit inline DegreeFunctor(const T* x) { this->dst_count = x; }
HOSTDEVICE inline T operator()(T i) const {
return dst_count[i + 1] - dst_count[i];
}
};
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GraphSampleNeighborsCUDAKernel(const uint64_t rand_seed,
int k,
const int64_t num_rows,
const T* in_rows,
const T* src,
const T* dst_count,
const T* src_eids,
T* outputs,
T* outputs_eids,
T* output_ptr,
T* output_idxs,
bool return_eids) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
#ifdef PADDLE_WITH_HIP
hiprandState rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x,
threadIdx.y * WARP_SIZE + threadIdx.x,
0,
&rng);
#else
curandState rng;
curand_init(rand_seed * gridDim.x + blockIdx.x,
threadIdx.y * WARP_SIZE + threadIdx.x,
0,
&rng);
#endif
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = dst_count[row];
const int64_t deg = dst_count[row + 1] - in_row_start;
const int64_t out_row_start = output_ptr[out_row];
if (deg <= k) {
for (int idx = threadIdx.x; idx < deg; idx += WARP_SIZE) {
const T in_idx = in_row_start + idx;
outputs[out_row_start + idx] = src[in_idx];
if (return_eids) {
outputs_eids[out_row_start + idx] = src_eids[in_idx];
}
}
} else {
for (int idx = threadIdx.x; idx < k; idx += WARP_SIZE) {
output_idxs[out_row_start + idx] = idx;
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
for (int idx = k + threadIdx.x; idx < deg; idx += WARP_SIZE) {
#ifdef PADDLE_WITH_HIP
const int num = hiprand(&rng) % (idx + 1);
#else
const int num = curand(&rng) % (idx + 1);
#endif
if (num < k) {
paddle::platform::CudaAtomicMax(output_idxs + out_row_start + num,
idx);
}
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
for (int idx = threadIdx.x; idx < k; idx += WARP_SIZE) {
const T perm_idx = output_idxs[out_row_start + idx] + in_row_start;
outputs[out_row_start + idx] = src[perm_idx];
if (return_eids) {
outputs_eids[out_row_start + idx] = src_eids[perm_idx];
}
}
}
out_row += BLOCK_WARPS;
}
}
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GetDstEdgeCUDAKernel(const int64_t num_rows,
const T* in_rows,
const T* dst_sample_counts,
const T* dst_ptr,
T* outputs) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t dst_sample_size = dst_sample_counts[out_row];
const int64_t out_row_start = dst_ptr[out_row];
for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) {
outputs[out_row_start + idx] = row;
}
#ifdef PADDLE_WITH_CUDA
__syncwarp();
#endif
out_row += BLOCK_WARPS;
}
}
template <typename T>
void SampleNeighbors(const framework::ExecutionContext& ctx,
const T* src,
const T* dst_count,
const T* src_eids,
thrust::device_vector<T>* inputs,
thrust::device_vector<T>* outputs,
thrust::device_vector<T>* output_counts,
thrust::device_vector<T>* outputs_eids,
int k,
bool is_first_layer,
bool is_last_layer,
bool return_eids) {
const size_t bs = inputs->size();
output_counts->resize(bs);
// 1. Get input nodes' degree.
thrust::transform(inputs->begin(),
inputs->end(),
output_counts->begin(),
DegreeFunctor<T>(dst_count));
// 2. Apply sample size k to get final sample size.
if (k >= 0) {
thrust::transform(output_counts->begin(),
output_counts->end(),
output_counts->begin(),
MaxFunctor<T>(k));
}
// 3. Get the number of total sample neighbors and some necessary datas.
T total_sample_num =
thrust::reduce(output_counts->begin(), output_counts->end());
if (is_first_layer) {
PADDLE_ENFORCE_GT(
total_sample_num,
0,
platform::errors::InvalidArgument(
"The input nodes `X` should have at least one neighbor, "
"but none of the input nodes have neighbors."));
}
outputs->resize(total_sample_num);
if (return_eids) {
outputs_eids->resize(total_sample_num);
}
thrust::device_vector<T> output_ptr;
thrust::device_vector<T> output_idxs;
output_ptr.resize(bs);
output_idxs.resize(total_sample_num);
thrust::exclusive_scan(
output_counts->begin(), output_counts->end(), output_ptr.begin(), 0);
// 4. Run graph sample kernel.
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((bs + TILE_SIZE - 1) / TILE_SIZE);
GraphSampleNeighborsCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(0,
k,
bs,
thrust::raw_pointer_cast(inputs->data()),
src,
dst_count,
src_eids,
thrust::raw_pointer_cast(outputs->data()),
thrust::raw_pointer_cast(outputs_eids->data()),
thrust::raw_pointer_cast(output_ptr.data()),
thrust::raw_pointer_cast(output_idxs.data()),
return_eids);
// 5. Get inputs = outputs - inputs:
if (!is_last_layer) {
thrust::sort(inputs->begin(), inputs->end());
thrust::device_vector<T> outputs_sort(outputs->size());
thrust::copy(outputs->begin(), outputs->end(), outputs_sort.begin());
thrust::sort(outputs_sort.begin(), outputs_sort.end());
auto outputs_sort_end =
thrust::unique(outputs_sort.begin(), outputs_sort.end());
outputs_sort.resize(
thrust::distance(outputs_sort.begin(), outputs_sort_end));
thrust::device_vector<T> unique_outputs(outputs_sort.size());
auto unique_outputs_end = thrust::set_difference(outputs_sort.begin(),
outputs_sort.end(),
inputs->begin(),
inputs->end(),
unique_outputs.begin());
inputs->resize(
thrust::distance(unique_outputs.begin(), unique_outputs_end));
thrust::copy(unique_outputs.begin(), unique_outputs_end, inputs->begin());
}
}
template <typename T>
void FillHashTable(const framework::ExecutionContext& ctx,
const T* input,
int64_t num_input,
int64_t len_hashtable,
thrust::device_vector<T>* unique_items,
thrust::device_vector<T>* keys,
thrust::device_vector<T>* values,
thrust::device_vector<int64_t>* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
const auto& dev_ctx = ctx.cuda_device_context();
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// 1. Insert data into keys and values.
BuildHashTable<T>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(input,
num_input,
len_hashtable,
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(key_index->data()));
// 2. Get item index count.
thrust::device_vector<int> item_count(num_input + 1, 0);
GetItemIndexCount<T>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(input,
thrust::raw_pointer_cast(item_count.data()),
num_input,
len_hashtable,
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(key_index->data()));
thrust::exclusive_scan(
item_count.begin(), item_count.end(), item_count.begin());
size_t total_unique_items = item_count[num_input];
unique_items->resize(total_unique_items);
// 3. Get unique items.
FillUniqueItems<T>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(input,
num_input,
len_hashtable,
thrust::raw_pointer_cast(unique_items->data()),
thrust::raw_pointer_cast(item_count.data()),
thrust::raw_pointer_cast(keys->data()),
thrust::raw_pointer_cast(values->data()),
thrust::raw_pointer_cast(key_index->data()));
}
template <typename T>
void ReindexFunc(const framework::ExecutionContext& ctx,
thrust::device_vector<T>* inputs,
thrust::device_vector<T>* outputs,
thrust::device_vector<T>* subset,
thrust::device_vector<T>* orig_nodes,
thrust::device_vector<T>* reindex_nodes,
int bs) {
subset->resize(inputs->size() + outputs->size());
thrust::copy(inputs->begin(), inputs->end(), subset->begin());
thrust::copy(
outputs->begin(), outputs->end(), subset->begin() + inputs->size());
thrust::device_vector<T> unique_items;
unique_items.clear();
// Fill hash table.
int64_t num = subset->size();
int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1));
int64_t size = log_num << 1;
thrust::device_vector<T> keys(size, -1);
thrust::device_vector<T> values(size, -1);
thrust::device_vector<int64_t> key_index(size, -1);
FillHashTable<T>(ctx,
thrust::raw_pointer_cast(subset->data()),
subset->size(),
size,
&unique_items,
&keys,
&values,
&key_index);
subset->resize(unique_items.size());
thrust::copy(unique_items.begin(), unique_items.end(), subset->begin());
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int64_t grid_tmp = (outputs->size() + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
ReindexSrcOutput<T>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(thrust::raw_pointer_cast(outputs->data()),
outputs->size(),
size,
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(values.data()));
int grid_ = (bs + block - 1) / block;
ReindexInputNodes<T>
<<<grid_,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(thrust::raw_pointer_cast(orig_nodes->data()),
bs,
thrust::raw_pointer_cast(reindex_nodes->data()),
size,
thrust::raw_pointer_cast(keys.data()),
thrust::raw_pointer_cast(values.data()));
}
template <typename DeviceContext, typename T>
class GraphKhopSamplerOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// 1. Get sample neighbors operators' inputs.
auto* src = ctx.Input<Tensor>("Row");
auto* dst_count = ctx.Input<Tensor>("Col_Ptr");
auto* vertices = ctx.Input<Tensor>("X");
std::vector<int> sample_sizes = ctx.Attr<std::vector<int>>("sample_sizes");
bool return_eids = ctx.Attr<bool>("return_eids");
const T* src_data = src->data<T>();
const T* dst_count_data = dst_count->data<T>();
const T* p_vertices = vertices->data<T>();
const int bs = vertices->dims()[0];
// 2. Get unique input nodes(X).
thrust::device_vector<T> inputs(bs);
thrust::copy(p_vertices, p_vertices + bs, inputs.begin());
auto unique_inputs_end = thrust::unique(inputs.begin(), inputs.end());
inputs.resize(thrust::distance(inputs.begin(), unique_inputs_end));
// 3. Sample neighbors. We should distinguish w/o "Src_Eids".
thrust::device_vector<T> outputs;
thrust::device_vector<T> output_counts;
thrust::device_vector<T> outputs_eids;
std::vector<thrust::device_vector<T>> dst_vec;
dst_vec.emplace_back(inputs);
std::vector<thrust::device_vector<T>> outputs_vec;
std::vector<thrust::device_vector<T>> output_counts_vec;
std::vector<thrust::device_vector<T>> outputs_eids_vec;
const size_t num_layers = sample_sizes.size();
bool is_last_layer = false, is_first_layer = true;
if (return_eids) {
auto* src_eids = ctx.Input<Tensor>("Eids");
const T* src_eids_data = src_eids->data<T>();
for (int i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
is_first_layer = false;
dst_vec.emplace_back(inputs);
}
SampleNeighbors<T>(ctx,
src_data,
dst_count_data,
src_eids_data,
&inputs,
&outputs,
&output_counts,
&outputs_eids,
sample_sizes[i],
is_first_layer,
is_last_layer,
return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
} else {
for (int i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
is_first_layer = false;
dst_vec.emplace_back(inputs);
}
SampleNeighbors<T>(ctx,
src_data,
dst_count_data,
nullptr,
&inputs,
&outputs,
&output_counts,
&outputs_eids,
sample_sizes[i],
is_first_layer,
is_last_layer,
return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
}
// 4. Concat intermediate sample results
// Including src_merge, unique_dst_merge and dst_sample_counts_merge.
thrust::device_vector<T> unique_dst_merge; // unique dst
thrust::device_vector<T> src_merge; // src
thrust::device_vector<T> dst_sample_counts_merge; // dst degree
int64_t unique_dst_size = 0, src_size = 0;
for (int i = 0; i < num_layers; i++) {
unique_dst_size += dst_vec[i].size();
src_size += outputs_vec[i].size();
}
unique_dst_merge.resize(unique_dst_size);
src_merge.resize(src_size);
dst_sample_counts_merge.resize(unique_dst_size);
auto unique_dst_merge_ptr = unique_dst_merge.begin();
auto src_merge_ptr = src_merge.begin();
auto dst_sample_counts_merge_ptr = dst_sample_counts_merge.begin();
for (int i = 0; i < num_layers; i++) {
if (i == 0) {
unique_dst_merge_ptr = thrust::copy(
dst_vec[i].begin(), dst_vec[i].end(), unique_dst_merge.begin());
src_merge_ptr = thrust::copy(
outputs_vec[i].begin(), outputs_vec[i].end(), src_merge.begin());
dst_sample_counts_merge_ptr =
thrust::copy(output_counts_vec[i].begin(),
output_counts_vec[i].end(),
dst_sample_counts_merge.begin());
} else {
unique_dst_merge_ptr = thrust::copy(
dst_vec[i].begin(), dst_vec[i].end(), unique_dst_merge_ptr);
src_merge_ptr = thrust::copy(
outputs_vec[i].begin(), outputs_vec[i].end(), src_merge_ptr);
dst_sample_counts_merge_ptr = thrust::copy(output_counts_vec[i].begin(),
output_counts_vec[i].end(),
dst_sample_counts_merge_ptr);
}
}
// 5. Return eids results.
if (return_eids) {
thrust::device_vector<T> eids_merge;
eids_merge.resize(src_size);
auto eids_merge_ptr = eids_merge.begin();
for (int i = 0; i < num_layers; i++) {
if (i == 0) {
eids_merge_ptr = thrust::copy(outputs_eids_vec[i].begin(),
outputs_eids_vec[i].end(),
eids_merge.begin());
} else {
eids_merge_ptr = thrust::copy(outputs_eids_vec[i].begin(),
outputs_eids_vec[i].end(),
eids_merge_ptr);
}
}
auto* out_eids = ctx.Output<Tensor>("Out_Eids");
out_eids->Resize({static_cast<int>(eids_merge.size())});
T* p_out_eids = out_eids->mutable_data<T>(ctx.GetPlace());
thrust::copy(eids_merge.begin(), eids_merge.end(), p_out_eids);
}
int64_t num_sample_edges = thrust::reduce(dst_sample_counts_merge.begin(),
dst_sample_counts_merge.end());
PADDLE_ENFORCE_EQ(
src_merge.size(),
num_sample_edges,
platform::errors::PreconditionNotMet(
"Number of sample edges dismatch, the sample kernel has error."));
// 6. Get hashtable according to unique_dst_merge and src_merge.
// We can get unique items(subset) and reindex src nodes of sample edges.
// We also get Reindex_X for input nodes here.
thrust::device_vector<T> orig_nodes(bs);
thrust::copy(p_vertices, p_vertices + bs, orig_nodes.begin());
thrust::device_vector<T> reindex_nodes(bs);
thrust::device_vector<T> subset;
ReindexFunc<T>(ctx,
&unique_dst_merge,
&src_merge,
&subset,
&orig_nodes,
&reindex_nodes,
bs);
auto* reindex_x = ctx.Output<Tensor>("Reindex_X");
T* p_reindex_x = reindex_x->mutable_data<T>(ctx.GetPlace());
thrust::copy(reindex_nodes.begin(), reindex_nodes.end(), p_reindex_x);
auto* sample_index = ctx.Output<Tensor>("Sample_Index");
sample_index->Resize({static_cast<int>(subset.size())});
T* p_sample_index = sample_index->mutable_data<T>(ctx.GetPlace());
thrust::copy(subset.begin(), subset.end(), p_sample_index); // Done!
// 7. Reindex dst nodes of sample edges.
thrust::device_vector<T> dst_merge(src_size);
thrust::device_vector<T> unique_dst_merge_reindex(unique_dst_size);
thrust::sequence(unique_dst_merge_reindex.begin(),
unique_dst_merge_reindex.end());
thrust::device_vector<T> dst_ptr(unique_dst_size);
thrust::exclusive_scan(dst_sample_counts_merge.begin(),
dst_sample_counts_merge.end(),
dst_ptr.begin());
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((unique_dst_size + TILE_SIZE - 1) / TILE_SIZE);
GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>
<<<grid,
block,
0,
reinterpret_cast<const phi::GPUContext&>(ctx.device_context())
.stream()>>>(
unique_dst_size,
thrust::raw_pointer_cast(unique_dst_merge_reindex.data()),
thrust::raw_pointer_cast(dst_sample_counts_merge.data()),
thrust::raw_pointer_cast(dst_ptr.data()),
thrust::raw_pointer_cast(dst_merge.data()));
// 8. Give operator's outputs.
auto* out_src = ctx.Output<Tensor>("Out_Src");
auto* out_dst = ctx.Output<Tensor>("Out_Dst");
out_src->Resize({static_cast<int>(src_merge.size()), 1});
out_dst->Resize({static_cast<int>(src_merge.size()), 1});
T* p_out_src = out_src->mutable_data<T>(ctx.GetPlace());
T* p_out_dst = out_dst->mutable_data<T>(ctx.GetPlace());
const size_t& memset_bytes = src_merge.size() * sizeof(T);
thrust::copy(src_merge.begin(), src_merge.end(), p_out_src);
thrust::copy(dst_merge.begin(), dst_merge.end(), p_out_dst);
}
};
} // namespace operators
} // namespace paddle
using CUDA = phi::GPUContext;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(graph_khop_sampler,
ops::GraphKhopSamplerOpCUDAKernel<CUDA, int32_t>,
ops::GraphKhopSamplerOpCUDAKernel<CUDA, int64_t>);
|
ff0d68991a79091d14d929508fa25c3bf6bff5e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "IntegrationParamsGPU.h"
#include <hip/hip_runtime.h>
#include "cudaUtil.h"
__constant__ IntegrationParamsGPU c_integrationParams;
IntegrationParamsGPU g_integrationParams;
void IntegrationParamsGPU::Upload(bool cpuTracing) const
{
if(cpuTracing)
{
memcpy(&g_integrationParams, this, sizeof(g_integrationParams));
}
else
{
cudaSafeCall(hipMemcpyToSymbolAsync(c_integrationParams, this, sizeof(*this), 0, hipMemcpyHostToDevice));
}
}
| ff0d68991a79091d14d929508fa25c3bf6bff5e8.cu | #include "IntegrationParamsGPU.h"
#include <cuda_runtime.h>
#include "cudaUtil.h"
__constant__ IntegrationParamsGPU c_integrationParams;
IntegrationParamsGPU g_integrationParams;
void IntegrationParamsGPU::Upload(bool cpuTracing) const
{
if(cpuTracing)
{
memcpy(&g_integrationParams, this, sizeof(g_integrationParams));
}
else
{
cudaSafeCall(cudaMemcpyToSymbolAsync(c_integrationParams, this, sizeof(*this), 0, cudaMemcpyHostToDevice));
}
}
|
7f540874ab90c9400a2ff47b5b71d9f183bb4c78.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mvm_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A_cuda = NULL;
hipMalloc(&A_cuda, XSIZE*YSIZE);
double *X_cuda = NULL;
hipMalloc(&X_cuda, XSIZE*YSIZE);
double *Y_cuda = NULL;
hipMalloc(&Y_cuda, XSIZE*YSIZE);
int *m_locals_cuda = NULL;
hipMalloc(&m_locals_cuda, XSIZE*YSIZE);
int *A_all_pos_cuda = NULL;
hipMalloc(&A_all_pos_cuda, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int nthreads = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mvm_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mvm_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mvm_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7f540874ab90c9400a2ff47b5b71d9f183bb4c78.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mvm_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A_cuda = NULL;
cudaMalloc(&A_cuda, XSIZE*YSIZE);
double *X_cuda = NULL;
cudaMalloc(&X_cuda, XSIZE*YSIZE);
double *Y_cuda = NULL;
cudaMalloc(&Y_cuda, XSIZE*YSIZE);
int *m_locals_cuda = NULL;
cudaMalloc(&m_locals_cuda, XSIZE*YSIZE);
int *A_all_pos_cuda = NULL;
cudaMalloc(&A_all_pos_cuda, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int nthreads = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mvm_gpu<<<gridBlock,threadBlock>>>(A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mvm_gpu<<<gridBlock,threadBlock>>>(A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mvm_gpu<<<gridBlock,threadBlock>>>(A_cuda,X_cuda,Y_cuda,m_locals_cuda,A_all_pos_cuda,n,nthreads);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2bec6a617dda54084639601f59aeb4edd92fff49.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomic instructions
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==curr)
{
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = start + graph.get_vertex_degree(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
*changed=true;
vplist[vid]=curr+1;
}
}
}
}
void cuda_BFS(uint64_t * vertexlist, uint64_t * degreelist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, degreelist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
hipEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph, curr, device_over);
cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) );
curr++;
}while(stop);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_vpl) );
}
| 2bec6a617dda54084639601f59aeb4edd92fff49.cu | //=================================================================//
// CUDA BFS kernel
// Topological-Driven: one node per thread, thread_centric,
// no atomic instructions
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==curr)
{
uint64_t start, end;
start = graph.get_firstedge_index(tid);
end = start + graph.get_vertex_degree(tid);
for (uint64_t i=start; i<end; i++)
{
uint64_t vid = graph.get_edge_dest(i);
if (vplist[vid]==MY_INFINITY)
{
*changed=true;
vplist[vid]=curr+1;
}
}
}
}
void cuda_BFS(uint64_t * vertexlist, uint64_t * degreelist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, degreelist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
cudaEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) );
kernel<<<num_block, num_thread_per_block>>>(device_vpl, d_graph, curr, device_over);
cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) );
curr++;
}while(stop);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_vpl) );
}
|
7b865e77a714f52f090ad0bc937e73d221ecfbbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
__shared__ float sx,sy,sz,sQr,sQi;
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
sx = x[xIndex];
sy = y[xIndex];
sz = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
int kIndex = 0;
if (numK % 4)
{
for (int j=0;j<numK%4;j++)
{
float expArg = PIx2 * (ck[j].Kx * sx + ck[j].Ky * sy + ck[j].Kz * sz);
sQr += ck[j].PhiMag * cos(expArg);
sQi += ck[j].PhiMag * sin(expArg);
kIndex++;
kGlobalIndex++;
}
}
for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex += 4, kGlobalIndex += 4) {
float expArg = PIx2 * (ck[kIndex].Kx * sx +
ck[kIndex].Ky * sy +
ck[kIndex].Kz * sz);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
int kIndex1 = kIndex + 1;
float expArg1 = PIx2 * (ck[kIndex1].Kx * sx +
ck[kIndex1].Ky * sy +
ck[kIndex1].Kz * sz);
sQr += ck[kIndex1].PhiMag * cos(expArg1);
sQi += ck[kIndex1].PhiMag * sin(expArg1);
int kIndex2 =kIndex+ 2;
float expArg2 = PIx2 * (ck[kIndex2].Kx * sx +
ck[kIndex2].Ky * sy +
ck[kIndex2].Kz * sz);
sQr += ck[kIndex2].PhiMag * cos(expArg2);
sQi += ck[kIndex2].PhiMag * sin(expArg2);
int kIndex3 =kIndex+ 3;
float expArg3 = PIx2 * (ck[kIndex3].Kx * sx +
ck[kIndex3].Ky * sy +
ck[kIndex3].Kz * sz);
sQr += ck[kIndex3].PhiMag * cos(expArg3);
sQi += ck[kIndex3].PhiMag * sin(expArg3);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
}
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = (numK-1) / KERNEL_PHI_MAG_THREADS_PER_BLOCK+1;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
hipLaunchKernelGGL(( ComputePhiMag_GPU) , dim3(DimPhiMagGrid), dim3(DimPhiMagBlock) , 0, 0,
phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = (numK-1)/KERNEL_Q_K_ELEMS_PER_GRID+1;
int QBlocks =(numX-1)/KERNEL_Q_THREADS_PER_BLOCK+1;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
hipMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0);
hipLaunchKernelGGL(( ComputeQ_GPU) , dim3(DimQGrid), dim3(DimQBlock) , 0, 0,
numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
}
| 7b865e77a714f52f090ad0bc937e73d221ecfbbd.cu | #include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
__shared__ float sx,sy,sz,sQr,sQi;
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
sx = x[xIndex];
sy = y[xIndex];
sz = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
int kIndex = 0;
if (numK % 4)
{
for (int j=0;j<numK%4;j++)
{
float expArg = PIx2 * (ck[j].Kx * sx + ck[j].Ky * sy + ck[j].Kz * sz);
sQr += ck[j].PhiMag * cos(expArg);
sQi += ck[j].PhiMag * sin(expArg);
kIndex++;
kGlobalIndex++;
}
}
for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex += 4, kGlobalIndex += 4) {
float expArg = PIx2 * (ck[kIndex].Kx * sx +
ck[kIndex].Ky * sy +
ck[kIndex].Kz * sz);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
int kIndex1 = kIndex + 1;
float expArg1 = PIx2 * (ck[kIndex1].Kx * sx +
ck[kIndex1].Ky * sy +
ck[kIndex1].Kz * sz);
sQr += ck[kIndex1].PhiMag * cos(expArg1);
sQi += ck[kIndex1].PhiMag * sin(expArg1);
int kIndex2 =kIndex+ 2;
float expArg2 = PIx2 * (ck[kIndex2].Kx * sx +
ck[kIndex2].Ky * sy +
ck[kIndex2].Kz * sz);
sQr += ck[kIndex2].PhiMag * cos(expArg2);
sQi += ck[kIndex2].PhiMag * sin(expArg2);
int kIndex3 =kIndex+ 3;
float expArg3 = PIx2 * (ck[kIndex3].Kx * sx +
ck[kIndex3].Ky * sy +
ck[kIndex3].Kz * sz);
sQr += ck[kIndex3].PhiMag * cos(expArg3);
sQi += ck[kIndex3].PhiMag * sin(expArg3);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
}
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = (numK-1) / KERNEL_PHI_MAG_THREADS_PER_BLOCK+1;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
ComputePhiMag_GPU <<< DimPhiMagGrid, DimPhiMagBlock >>>
(phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = (numK-1)/KERNEL_Q_K_ELEMS_PER_GRID+1;
int QBlocks =(numX-1)/KERNEL_Q_THREADS_PER_BLOCK+1;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
cudaMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0);
ComputeQ_GPU <<< DimQGrid, DimQBlock >>>
(numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
}
|
0956d575e900dbec7bf6d86c003f1afd33a0705c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CopyVectorKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *from = NULL;
hipMalloc(&from, XSIZE*YSIZE);
int fromOffset = 1;
float *to = NULL;
hipMalloc(&to, XSIZE*YSIZE);
int toOffset = 1;
int vectorSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CopyVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, from,fromOffset,to,toOffset,vectorSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CopyVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, from,fromOffset,to,toOffset,vectorSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CopyVectorKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, from,fromOffset,to,toOffset,vectorSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0956d575e900dbec7bf6d86c003f1afd33a0705c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CopyVectorKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *from = NULL;
cudaMalloc(&from, XSIZE*YSIZE);
int fromOffset = 1;
float *to = NULL;
cudaMalloc(&to, XSIZE*YSIZE);
int toOffset = 1;
int vectorSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CopyVectorKernel<<<gridBlock,threadBlock>>>(from,fromOffset,to,toOffset,vectorSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CopyVectorKernel<<<gridBlock,threadBlock>>>(from,fromOffset,to,toOffset,vectorSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CopyVectorKernel<<<gridBlock,threadBlock>>>(from,fromOffset,to,toOffset,vectorSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bdad80fd3dba60627dd7e387ea7395ba6e4049e6.hip | // !!! This is a file automatically generated by hipify!!!
#include"cuda_need.h"
void smooth1D_pre_data(char filePath[], int imgsize, float* memcpyHD_1D, float* memcpyDH_1D, float* kernel_1D, float* total_1D)
{
float *d_data;
float *d_out;
float timeDelay;
clock_t begintime, endtime;
clock_t totalbegintime, totalendtime;
float *data = new float[imgsize*imgsize];
readData(filePath, data, imgsize);
totalbegintime = clock();
//printf("\ncuda_smooth1D begin....\n");
hipMalloc((void**)&d_data, sizeof(float)*imgsize*imgsize);
hipMalloc((void**)&d_out, sizeof(float)*imgsize*imgsize);
begintime = clock();
hipMemcpy(d_data, data, sizeof(float)*imgsize*imgsize, hipMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_1D = *memcpyHD_1D + timeDelay;
// printf("in 1D memcpyHD time is :%.3fms\n", timeDelay);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, imgsize, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_1D = *kernel_1D + timeDelay;
// printf("in 1D kernel time :%.3fms\n", timeDelay);
begintime = clock();
hipMemcpy(data, d_out, sizeof(float)*imgsize*imgsize, hipMemcpyDeviceToHost);
endtime = clock();
hipDeviceSynchronize();
totalendtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_1D = *memcpyDH_1D + timeDelay;
// printf("1D memcpyDH time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_1D = *total_1D + timeDelay;
// for(int i=0;i<10;i++)
// printf("%.3f ",data[i]);
// printf("\n");
// printf("in 1D total time is:%.3fms\n",timeDelay);
//printf("\n\n");
}
void smooth2D_pre_data(char filepath[], int imgsize, float* memcpyHD_2D, float* memcpyDH_2D, float* kernel_2D, float* total_2D)
{
float *d_data;
float *d_out;
float timeDelay;
size_t pitch;
clock_t begintime, endtime, totalbegintime, totalendtime;
float *data = new float[imgsize*imgsize];
readData(filepath, data, imgsize);
totalbegintime = clock();
// printf("cuda_smooth2D begin.....\n");
hipMallocPitch((void**)&d_data, &pitch, imgsize*sizeof(float), imgsize);
hipMallocPitch((void**)&d_out, &pitch, imgsize*sizeof(float), imgsize);
begintime = clock();
hipMemcpy2D(d_data, pitch, data, imgsize*sizeof(float), imgsize*sizeof(float), imgsize, hipMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_2D = *memcpyHD_2D + timeDelay;
//printf("in 2D memcpyHostToDevice time is :%.3fms\n", timeDelay);
begintime = clock();
// the gpu used maximum number of threads of per block:1024
dim3 dimBlock(32, 32);
//max of grid 2147483647
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth_pitch << <dimGrid, dimBlock >> >(d_data, d_out, pitch, imgsize, WINSIZE);
//smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, DATASIZE, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_2D = *kernel_2D + timeDelay;
//printf("in 2D kernel function time :%.3fms\n", timeDelay);
begintime = clock();
hipMemcpy2D(data, imgsize*sizeof(float), d_out, pitch, imgsize*sizeof(float), imgsize, hipMemcpyDeviceToHost);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_2D = *memcpyDH_2D + timeDelay;
totalendtime = clock();
// printf("in 2D memcpyDeviceToHost time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_2D = *total_2D + timeDelay;
// printf("in 2D cuda_smooth2D total time is :%.3fms\n", timeDelay);
//printf("\n\n");
}
void smooth_pre_data_cpu(char filePath[], int imgsize, float* total_cpu){
float **data;
int endtime, begintime;
clock_t start, end;
double timedelay;
float timeDelay;
data = (float **)malloc(sizeof(float *)*imgsize);
for (int i = 0; i < imgsize; i++)
data[i] = (float *)malloc(sizeof(float)*imgsize);
readData2D(filePath, data);
start = clock();
do_smooth(data, imgsize);
end = clock();
timeDelay = (double)(end - start) * 1000 / CLOCKS_PER_SEC;
*total_cpu = *total_cpu + timeDelay;
printf("\nsmooth_cpu starting....\n");
printf("do_smooth_cpu:%.3fms\n", timeDelay);
}
void do_smooth(float **data, int imgsize){
int sum, count;
int x, y;
for (int i = 0; i < imgsize; i++)
{
for (int j = 0; j < imgsize; j++){
sum = 0;
count = 0;
for (int m = 0; m < WINSIZE; m++){
for (int n = 0; n < WINSIZE; n++){
x = i + m;
y = j + n;
if (x < imgsize&&y < imgsize){
sum += data[x][y];
count++;
}
}
}
data[i][j] = sum / count;
}
}
}
void mallocHost(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data = new float[datasize*datasize];
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
hipSetDeviceFlags(hipDeviceMapHost);
hipMalloc((void**)&d_data, sizeof(float)*datasize*datasize);
readData(filepath, data, datasize);
totalbegintime = clock();
hipMemcpy(d_data, data, sizeof(float)*datasize*datasize, hipMemcpyHostToDevice);
// hipHostMalloc((void**)&data, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
hipHostMalloc((void**)&host2dev, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
//cudaHostAllocWriteCombinedGPUCPU
hipHostGetDevicePointer(&dev_host2dev, host2dev, 0);
// hipHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
hipDeviceSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOST:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOST:%.3f\n", delay);
//hipMemcpy(data, d_data, sizeof(float)*datasize*datasize, hipMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
hipFree(d_data);
hipHostFree(dev_host2dev);
}
void mallocHostAll(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data;
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc((void**)&data, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
hipHostMalloc((void**)&host2dev, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
//cudaHostAllocWriteCombinedGPUCPU
readData(filepath, data, datasize);
totalbegintime = clock();
hipHostGetDevicePointer(&dev_host2dev, host2dev, 0);
hipHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
hipDeviceSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOSTALL:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOSTALL:%.3f\n", delay);
//hipMemcpy(data, d_data, sizeof(float)*datasize*datasize, hipMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
hipFree(d_data);
hipHostFree(dev_host2dev);
}
void mallocHostDefault(char filepath[], int datasize, float *memcpyDH_hostDefault,float *kernel_hostDefault, float *total_hostDefault){
float *data=new float[datasize*datasize];
float *d_data;
float *out;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
readData(filepath, data, datasize);
totalbegintime = clock();
hipMalloc((void**)&d_data, sizeof(float)*datasize*datasize);
begintime=clock();
hipMemcpy(d_data, data, sizeof(float)*datasize*datasize, hipMemcpyHostToDevice);
endtime=clock();
delay=(double)(endtime-begintime)*1000/CLOCKS_PER_SEC;
// printf("int function mallocHostDefault memcpyHD:%.3f\n",delay);
hipHostMalloc((void**)&out, sizeof(float)*datasize*datasize,hipHostMallocDefault);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, out, datasize, WINSIZE);
hipDeviceSynchronize();
endtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_hostDefault = *kernel_hostDefault + delay;
// printf("in function mallocHostDefault kernel :%.3f\n", delay);
begintime = clock();
hipMemcpy(data, out, sizeof(float)*datasize*datasize, hipMemcpyDeviceToHost);
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_hostDefault = *memcpyDH_hostDefault + delay;
//printf("in function mallocHostDefault memcpyDH:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_hostDefault = *total_hostDefault + delay;
//printf("in function mallocHostDefault totaltime:%.3f\n", delay);
}
| bdad80fd3dba60627dd7e387ea7395ba6e4049e6.cu |
#include"cuda_need.h"
void smooth1D_pre_data(char filePath[], int imgsize, float* memcpyHD_1D, float* memcpyDH_1D, float* kernel_1D, float* total_1D)
{
float *d_data;
float *d_out;
float timeDelay;
clock_t begintime, endtime;
clock_t totalbegintime, totalendtime;
float *data = new float[imgsize*imgsize];
readData(filePath, data, imgsize);
totalbegintime = clock();
//printf("\ncuda_smooth1D begin....\n");
cudaMalloc((void**)&d_data, sizeof(float)*imgsize*imgsize);
cudaMalloc((void**)&d_out, sizeof(float)*imgsize*imgsize);
begintime = clock();
cudaMemcpy(d_data, data, sizeof(float)*imgsize*imgsize, cudaMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_1D = *memcpyHD_1D + timeDelay;
// printf("in 1D memcpyHD time is :%.3fms\n", timeDelay);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, imgsize, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_1D = *kernel_1D + timeDelay;
// printf("in 1D kernel time :%.3fms\n", timeDelay);
begintime = clock();
cudaMemcpy(data, d_out, sizeof(float)*imgsize*imgsize, cudaMemcpyDeviceToHost);
endtime = clock();
cudaThreadSynchronize();
totalendtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_1D = *memcpyDH_1D + timeDelay;
// printf("1D memcpyDH time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_1D = *total_1D + timeDelay;
// for(int i=0;i<10;i++)
// printf("%.3f ",data[i]);
// printf("\n");
// printf("in 1D total time is:%.3fms\n",timeDelay);
//printf("\n\n");
}
void smooth2D_pre_data(char filepath[], int imgsize, float* memcpyHD_2D, float* memcpyDH_2D, float* kernel_2D, float* total_2D)
{
float *d_data;
float *d_out;
float timeDelay;
size_t pitch;
clock_t begintime, endtime, totalbegintime, totalendtime;
float *data = new float[imgsize*imgsize];
readData(filepath, data, imgsize);
totalbegintime = clock();
// printf("cuda_smooth2D begin.....\n");
cudaMallocPitch((void**)&d_data, &pitch, imgsize*sizeof(float), imgsize);
cudaMallocPitch((void**)&d_out, &pitch, imgsize*sizeof(float), imgsize);
begintime = clock();
cudaMemcpy2D(d_data, pitch, data, imgsize*sizeof(float), imgsize*sizeof(float), imgsize, cudaMemcpyHostToDevice);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyHD_2D = *memcpyHD_2D + timeDelay;
//printf("in 2D memcpyHostToDevice time is :%.3fms\n", timeDelay);
begintime = clock();
// the gpu used maximum number of threads of per block:1024
dim3 dimBlock(32, 32);
//max of grid 2147483647
dim3 dimGrid((imgsize + dimBlock.x - 1) / (dimBlock.x), (imgsize + dimBlock.y - 1) / (dimBlock.y));
smooth_pitch << <dimGrid, dimBlock >> >(d_data, d_out, pitch, imgsize, WINSIZE);
//smooth1D << <dimGrid, dimBlock >> >(d_data, d_out, DATASIZE, WINSIZE);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_2D = *kernel_2D + timeDelay;
//printf("in 2D kernel function time :%.3fms\n", timeDelay);
begintime = clock();
cudaMemcpy2D(data, imgsize*sizeof(float), d_out, pitch, imgsize*sizeof(float), imgsize, cudaMemcpyDeviceToHost);
endtime = clock();
timeDelay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_2D = *memcpyDH_2D + timeDelay;
totalendtime = clock();
// printf("in 2D memcpyDeviceToHost time is :%.3fms\n", timeDelay);
timeDelay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_2D = *total_2D + timeDelay;
// printf("in 2D cuda_smooth2D total time is :%.3fms\n", timeDelay);
//printf("\n\n");
}
void smooth_pre_data_cpu(char filePath[], int imgsize, float* total_cpu){
float **data;
int endtime, begintime;
clock_t start, end;
double timedelay;
float timeDelay;
data = (float **)malloc(sizeof(float *)*imgsize);
for (int i = 0; i < imgsize; i++)
data[i] = (float *)malloc(sizeof(float)*imgsize);
readData2D(filePath, data);
start = clock();
do_smooth(data, imgsize);
end = clock();
timeDelay = (double)(end - start) * 1000 / CLOCKS_PER_SEC;
*total_cpu = *total_cpu + timeDelay;
printf("\nsmooth_cpu starting....\n");
printf("do_smooth_cpu:%.3fms\n", timeDelay);
}
void do_smooth(float **data, int imgsize){
int sum, count;
int x, y;
for (int i = 0; i < imgsize; i++)
{
for (int j = 0; j < imgsize; j++){
sum = 0;
count = 0;
for (int m = 0; m < WINSIZE; m++){
for (int n = 0; n < WINSIZE; n++){
x = i + m;
y = j + n;
if (x < imgsize&&y < imgsize){
sum += data[x][y];
count++;
}
}
}
data[i][j] = sum / count;
}
}
}
void mallocHost(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data = new float[datasize*datasize];
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaMalloc((void**)&d_data, sizeof(float)*datasize*datasize);
readData(filepath, data, datasize);
totalbegintime = clock();
cudaMemcpy(d_data, data, sizeof(float)*datasize*datasize, cudaMemcpyHostToDevice);
// cudaHostAlloc((void**)&data, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
cudaHostAlloc((void**)&host2dev, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
//cudaHostAllocWriteCombined合并式写入,有效提高GPU读取这个内存,但是如果CPU也需要读取这个内存,会降低性能
cudaHostGetDevicePointer(&dev_host2dev, host2dev, 0);
// cudaHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
cudaThreadSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOST:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOST:%.3f\n", delay);
//cudaMemcpy(data, d_data, sizeof(float)*datasize*datasize, cudaMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
cudaFree(d_data);
cudaFreeHost(dev_host2dev);
}
void mallocHostAll(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data;
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc((void**)&data, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
cudaHostAlloc((void**)&host2dev, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
//cudaHostAllocWriteCombined合并式写入,有效提高GPU读取这个内存,但是如果CPU也需要读取这个内存,会降低性能
readData(filepath, data, datasize);
totalbegintime = clock();
cudaHostGetDevicePointer(&dev_host2dev, host2dev, 0);
cudaHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
cudaThreadSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOSTALL:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOSTALL:%.3f\n", delay);
//cudaMemcpy(data, d_data, sizeof(float)*datasize*datasize, cudaMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
cudaFree(d_data);
cudaFreeHost(dev_host2dev);
}
void mallocHostDefault(char filepath[], int datasize, float *memcpyDH_hostDefault,float *kernel_hostDefault, float *total_hostDefault){
float *data=new float[datasize*datasize];
float *d_data;
float *out;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
readData(filepath, data, datasize);
totalbegintime = clock();
cudaMalloc((void**)&d_data, sizeof(float)*datasize*datasize);
begintime=clock();
cudaMemcpy(d_data, data, sizeof(float)*datasize*datasize, cudaMemcpyHostToDevice);
endtime=clock();
delay=(double)(endtime-begintime)*1000/CLOCKS_PER_SEC;
// printf("int function mallocHostDefault memcpyHD:%.3f\n",delay);
cudaHostAlloc((void**)&out, sizeof(float)*datasize*datasize,cudaHostAllocDefault);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, out, datasize, WINSIZE);
cudaThreadSynchronize();
endtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_hostDefault = *kernel_hostDefault + delay;
// printf("in function mallocHostDefault kernel :%.3f\n", delay);
begintime = clock();
cudaMemcpy(data, out, sizeof(float)*datasize*datasize, cudaMemcpyDeviceToHost);
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*memcpyDH_hostDefault = *memcpyDH_hostDefault + delay;
//printf("in function mallocHostDefault memcpyDH:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_hostDefault = *total_hostDefault + delay;
//printf("in function mallocHostDefault totaltime:%.3f\n", delay);
}
|
8021d73c641ed05733feee475ef3fa5d21d03422.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex_projection.cuh" // consists all required package and functions
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define GEO_PARA prhs[1]
#define OUT_PROJ plhs[0]
int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj;
float da, db, ai, bi, SO, SD, angle;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
numImg = nx * ny * nz; // size of image
numBytesImg = numImg * sizeof(float); // number of bytes in image
// detector plane resolutions
if (mxGetField(GEO_PARA, 0, "na") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na"));
else if (mxGetField(GEO_PARA, 0, "nv") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n");
if (mxGetField(GEO_PARA, 0, "nb") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb"));
else if (mxGetField(GEO_PARA, 0, "nu") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n");
numSingleProj = na * nb;
numBytesSingleProj = numSingleProj * sizeof(float);
// detector resolution
if (mxGetField(GEO_PARA, 0, "da") != NULL)
da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da"));
else{
da = 1.0f;
mexPrintf("Automatically set detector cell size da to 1. \n");
mexPrintf("If don't want that default value, please set para.da manually.\n");
}
if (mxGetField(GEO_PARA, 0, "db") != NULL)
db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db"));
else{
db = 1.0f;
mexPrintf("Automatically set detectof cell size db to 1. \n");
mexPrintf("If don't want that default value, please set para.db manually.\n");
}
// detector plane offset from centered calibrations
if (mxGetField(GEO_PARA, 0, "ai") != NULL){
ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai"));
ai -= (float)na / 2 - 0.5f;
}
else{
mexPrintf("Automatically set detector offset ai to 0. \n");
mexPrintf("If don't want that default value, please set para.ai manually.\n");
ai = - (float)na / 2 + 0.5f;
}
if (mxGetField(GEO_PARA, 0, "bi") != NULL){
bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi"));
if (bi > -1)
bi -= (float)nb / 2 - 0.5f;
}
else{
mexPrintf("Automatically set detector offset bi to 0. \n");
mexPrintf("If don't want that default value, please set para.bi manually.\n");
bi = - (float)nb / 2 + 0.5f;
}
if (mxGetField(GEO_PARA, 0, "SO") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO"));
else if (mxGetField(GEO_PARA, 0, "SI") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n");
if (mxGetField(GEO_PARA, 0, "SD") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD"));
else if (mxGetField(GEO_PARA, 0, "DI") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO;
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n");
if (mxGetField(GEO_PARA, 0, "angle") != NULL)
angle = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "angle"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid projection angle, which is denoted with para.angle.\n");
float *d_img, *d_proj;
hipMalloc((void**)&d_img, nx * ny * nz * sizeof(float));
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
hipMemcpy(d_img, h_img, nx * ny * nz * sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&d_proj, na * nb * sizeof(float));
const dim3 gridSize_singleProj((na + BLOCKWIDTH - 1) / BLOCKWIDTH, (nb + BLOCKHEIGHT - 1) / BLOCKHEIGHT, 1);
const dim3 blockSize(BLOCKWIDTH,BLOCKHEIGHT, BLOCKDEPTH);
hipLaunchKernelGGL((
kernel_projection), dim3(gridSize_singleProj), dim3(blockSize), 0, 0, d_proj, d_img, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
hipDeviceSynchronize();
OUT_PROJ = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[2] = {(mwSize)na, (mwSize)nb};
mxSetDimensions(OUT_PROJ, outDim, 2);
mxSetData(OUT_PROJ, mxMalloc(na * nb * sizeof(float)));
float *h_outproj = (float*)mxGetData(OUT_PROJ);
hipMemcpy(h_outproj, d_proj, numBytesSingleProj, hipMemcpyDeviceToHost);
hipFree(d_proj);
hipFree(d_img);
hipDeviceReset();
return;
}
| 8021d73c641ed05733feee475ef3fa5d21d03422.cu | #include "mex_projection.cuh" // consists all required package and functions
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define GEO_PARA prhs[1]
#define OUT_PROJ plhs[0]
int nx, ny, nz, na, nb, numImg, numBytesImg, numSingleProj, numBytesSingleProj;
float da, db, ai, bi, SO, SD, angle;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
numImg = nx * ny * nz; // size of image
numBytesImg = numImg * sizeof(float); // number of bytes in image
// detector plane resolutions
if (mxGetField(GEO_PARA, 0, "na") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "na"));
else if (mxGetField(GEO_PARA, 0, "nv") != NULL)
na = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nv"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector in plane, which is denoted as na or nu.\n");
if (mxGetField(GEO_PARA, 0, "nb") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nb"));
else if (mxGetField(GEO_PARA, 0, "nu") != NULL)
nb = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nu"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid number of detector across plane, which is denoted as nb or nv.\n");
numSingleProj = na * nb;
numBytesSingleProj = numSingleProj * sizeof(float);
// detector resolution
if (mxGetField(GEO_PARA, 0, "da") != NULL)
da = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "da"));
else{
da = 1.0f;
mexPrintf("Automatically set detector cell size da to 1. \n");
mexPrintf("If don't want that default value, please set para.da manually.\n");
}
if (mxGetField(GEO_PARA, 0, "db") != NULL)
db = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "db"));
else{
db = 1.0f;
mexPrintf("Automatically set detectof cell size db to 1. \n");
mexPrintf("If don't want that default value, please set para.db manually.\n");
}
// detector plane offset from centered calibrations
if (mxGetField(GEO_PARA, 0, "ai") != NULL){
ai = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "ai"));
ai -= (float)na / 2 - 0.5f;
}
else{
mexPrintf("Automatically set detector offset ai to 0. \n");
mexPrintf("If don't want that default value, please set para.ai manually.\n");
ai = - (float)na / 2 + 0.5f;
}
if (mxGetField(GEO_PARA, 0, "bi") != NULL){
bi = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "bi"));
if (bi > -1)
bi -= (float)nb / 2 - 0.5f;
}
else{
mexPrintf("Automatically set detector offset bi to 0. \n");
mexPrintf("If don't want that default value, please set para.bi manually.\n");
bi = - (float)nb / 2 + 0.5f;
}
if (mxGetField(GEO_PARA, 0, "SO") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SO"));
else if (mxGetField(GEO_PARA, 0, "SI") != NULL)
SO = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SI"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and isocenter, which is denoted with para.SO or para.DI.\n");
if (mxGetField(GEO_PARA, 0, "SD") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "SD"));
else if (mxGetField(GEO_PARA, 0, "DI") != NULL)
SD = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "DI")) + SO;
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid distance between source and detector plane, which is denoted with para.SD or para.SI + para.DI.\n");
if (mxGetField(GEO_PARA, 0, "angle") != NULL)
angle = (float)mxGetScalar(mxGetField(GEO_PARA, 0, "angle"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid projection angle, which is denoted with para.angle.\n");
float *d_img, *d_proj;
cudaMalloc((void**)&d_img, nx * ny * nz * sizeof(float));
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
cudaMemcpy(d_img, h_img, nx * ny * nz * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_proj, na * nb * sizeof(float));
const dim3 gridSize_singleProj((na + BLOCKWIDTH - 1) / BLOCKWIDTH, (nb + BLOCKHEIGHT - 1) / BLOCKHEIGHT, 1);
const dim3 blockSize(BLOCKWIDTH,BLOCKHEIGHT, BLOCKDEPTH);
kernel_projection<<<gridSize_singleProj, blockSize>>>(d_proj, d_img, angle, SO, SD, da, na, ai, db, nb, bi, nx, ny, nz);
cudaDeviceSynchronize();
OUT_PROJ = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[2] = {(mwSize)na, (mwSize)nb};
mxSetDimensions(OUT_PROJ, outDim, 2);
mxSetData(OUT_PROJ, mxMalloc(na * nb * sizeof(float)));
float *h_outproj = (float*)mxGetData(OUT_PROJ);
cudaMemcpy(h_outproj, d_proj, numBytesSingleProj, cudaMemcpyDeviceToHost);
cudaFree(d_proj);
cudaFree(d_img);
cudaDeviceReset();
return;
}
|
1dc01742b291c8cb67f13c1986dbda695fd85474.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <cstring> // needed for memset
#include <float_vector.h>
#include <tune_quda.h>
#include <typeinfo>
#include <quda_internal.h>
#include <blas_quda.h>
#include <color_spinor_field.h>
#include <face_quda.h> // this is where the MPI / QMP depdendent code is
#define checkSpinor(a, b) \
{ \
if (a.Precision() != b.Precision()) \
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
#define checkLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
#include <texture.h>
unsigned long long blas_flops;
unsigned long long blas_bytes;
void zeroCuda(cudaColorSpinorField &a) { a.zero(); }
static hipStream_t *blasStream;
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
void initReduce();
void endReduce();
void initBlas()
{
blasStream = &streams[Nstream-1];
initReduce();
}
void endBlas(void)
{
endReduce();
}
hipStream_t* getBlasStream() { return blasStream; }
#include <blas_core.h>
#include <blas_mixed_core.h>
/**
Functor to perform the operation y = a*x + b*y
*/
template <typename Float2, typename FloatN>
struct axpby {
const Float2 a;
const Float2 b;
axpby(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = a.x*x + b.x*y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) {
blasCuda<axpby,0,1,0,0>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y += x
*/
template <typename Float2, typename FloatN>
struct xpy {
xpy(const Float2 &a, const Float2 &b, const Float2 &c) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y += x ; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<xpy,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y += a*x
*/
template <typename Float2, typename FloatN>
struct axpy {
const Float2 a;
axpy(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = a.x*x + y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpy,0,1,0,0>(make_double2(a,0.0), make_double2(1.0,0.0), make_double2(0.0,0.0),
x, y, x, x);
} else {
blasCuda<axpy,0,1,0,0>(make_double2(a, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
}
/**
Functor to perform the operation y = x + a*y
*/
template <typename Float2, typename FloatN>
struct xpay {
const Float2 a;
xpay(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = x + a.x*y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
void xpayCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) {
blasCuda<xpay,0,1,0,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y -= x;
*/
template <typename Float2, typename FloatN>
struct mxpy {
mxpy(const Float2 &a, const Float2 &b, const Float2 &c) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y -= x; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<mxpy,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to perform the operation x *= a
*/
template <typename Float2, typename FloatN>
struct ax {
const Float2 a;
ax(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(FloatN &x, const FloatN &y, const FloatN &z, const FloatN &w) { x *= a.x; }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void axCuda(const double &a, cudaColorSpinorField &x) {
blasCuda<ax,1,0,0,0>(make_double2(a, 0.0), make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ void caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
__device__ void caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ void caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
template <typename Float2, typename FloatN>
struct caxpy {
const Float2 a;
caxpy(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ caxpy_(a, x, y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
void caxpyCuda(const Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<caxpy,0,1,0,0>(make_double2(REAL(a), IMAG(a)),
make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to perform the operation y = a*x + b*y (complex-valued)
*/
__device__ void caxpby_(const float2 &a, const float4 &x, const float2 &b, float4 &y)
{ float4 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
yy.z = a.x*x.z; yy.z -= a.y*x.w; yy.z += b.x*y.z; yy.z -= b.y*y.w;
yy.w = a.y*x.z; yy.w += a.x*x.w; yy.w += b.y*y.z; yy.w += b.x*y.w;
y = yy; }
__device__ void caxpby_(const float2 &a, const float2 &x, const float2 &b, float2 &y)
{ float2 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
y = yy; }
__device__ void caxpby_(const double2 &a, const double2 &x, const double2 &b, double2 &y)
{ double2 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
y = yy; }
template <typename Float2, typename FloatN>
struct caxpby {
const Float2 a;
const Float2 b;
caxpby(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { caxpby_(a, x, b, y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 7; } //! flops per element
};
void caxpbyCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b, cudaColorSpinorField &y) {
blasCuda<caxpby,0,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to performs the operation z[i] = x[i] + a*y[i] + b*z[i]
*/
__device__ void cxpaypbz_(const float4 &x, const float2 &a, const float4 &y, const float2 &b, float4 &z) {
float4 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
zz.z = x.z + a.x*y.z; zz.z -= a.y*y.w; zz.z += b.x*z.z; zz.z -= b.y*z.w;
zz.w = x.w + a.y*y.z; zz.w += a.x*y.w; zz.w += b.y*z.z; zz.w += b.x*z.w;
z = zz;
}
__device__ void cxpaypbz_(const float2 &x, const float2 &a, const float2 &y, const float2 &b, float2 &z) {
float2 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
z = zz;
}
__device__ void cxpaypbz_(const double2 &x, const double2 &a, const double2 &y, const double2 &b, double2 &z) {
double2 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
z = zz;
}
template <typename Float2, typename FloatN>
struct cxpaypbz {
const Float2 a;
const Float2 b;
cxpaypbz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, FloatN &z, FloatN &w)
{ cxpaypbz_(x, a, y, b, z); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void cxpaypbzCuda(cudaColorSpinorField &x, const Complex &a, cudaColorSpinorField &y,
const Complex &b, cudaColorSpinorField &z) {
blasCuda<cxpaypbz,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)),
make_double2(0.0, 0.0), x, y, z, z);
}
/**
Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]
*/
template <typename Float2, typename FloatN>
struct axpyBzpcx {
const Float2 a;
const Float2 b;
const Float2 c;
axpyBzpcx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ y += a.x*x; x = b.x*z + c.x*x; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b,
cudaColorSpinorField& z, const double &c) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpyBzpcx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(c,0.0), x, y, z, x);
} else {
// swap arguments around
blasCuda<axpyBzpcx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(c,0.0), x, y, z, x);
}
}
/**
Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]
*/
template <typename Float2, typename FloatN>
struct axpyZpbx {
const Float2 a;
const Float2 b;
axpyZpbx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ y += a.x*x; x = z + b.x*x; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void axpyZpbxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y,
cudaColorSpinorField& z, const double &b) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpyZpbx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0),
x, y, z, x);
} else {
// swap arguments around
blasCuda<axpyZpbx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0),
x, y, z, x);
}
}
/**
Functor performing the operations z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypzYmbw {
const Float2 a;
const Float2 b;
caxpbypzYmbw(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, FloatN &z, const FloatN &w)
{ caxpy_(a, x, z); caxpy_(b, y, z); caxpy_(-b, w, y); }
static int streams() { return 6; } //! total number of input and output streams
static int flops() { return 12; } //! flops per element
};
void caxpbypzYmbwCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) {
blasCuda<caxpbypzYmbw,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, z, w);
}
/**
Functor performing the operation y[i] += a*b*x[i], x[i] *= a
*/
template <typename Float2, typename FloatN>
struct cabxpyAx {
const Float2 a;
const Float2 b;
cabxpyAx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ x *= a.x; caxpy_(b, x, y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void cabxpyAxCuda(const double &a, const Complex &b,
cudaColorSpinorField &x, cudaColorSpinorField &y) {
// swap arguments around
blasCuda<cabxpyAx,1,1,0,0>(make_double2(a,0.0), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, x, x);
}
/**
Functor performing the operation z[i] = a*x[i] + b*y[i] + z[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypz {
const Float2 a;
const Float2 b;
caxpbypz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, FloatN &z, const FloatN &w)
{ caxpy_(a, x, z); caxpy_(b, y, z); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void caxpbypzCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
blasCuda<caxpbypz,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, z, z);
}
/**
Functor Performing the operation w[i] = a*x[i] + b*y[i] + c*z[i] + w[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypczpw {
const Float2 a;
const Float2 b;
const Float2 c;
caxpbypczpw(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, const FloatN &z, FloatN &w)
{ caxpy_(a, x, w); caxpy_(b, y, w); caxpy_(c, z, w); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void caxpbypczpwCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, const Complex &c, cudaColorSpinorField &z,
cudaColorSpinorField &w) {
blasCuda<caxpbypczpw,0,0,0,1>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(REAL(c),IMAG(c)), x, y, z, w);
}
/**
double caxpyXmazCuda(c a, V x, V y, V z){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
*/
template <typename Float2, typename FloatN>
struct caxpyxmaz {
Float2 a;
caxpyxmaz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ caxpy_(a, x, y); x-= a.x*z; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void caxpyXmazCuda(const Complex &a, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
blasCuda<caxpyxmaz,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, y, z, x);
}
/**
double tripleCGUpdate(d a, d b, V x, V y, V z, V w){}
First performs the operation y[i] = y[i] - a*x[i]
Second performs the operatio z[i] = z[i] + a*w[i]
Third performs the operation w[i] = y[i] + b*w[i]
First performs the operatio y[i] = y[i] + a*w[i]
Second performs the operation z[i] = z[i] - a*x[i]
Third performs the operation w[i] = z[i] + b*w[i]
*/
template <typename Float2, typename FloatN>
struct tripleCGUpdate {
Float2 a, b;
tripleCGUpdate(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, FloatN &z, FloatN &w)
//{ y -= a.x*x; z += a.x*w; w = y + b.x*w; }
{ y += a.x*w; z -= a.x*x; w = z + b.x*w; }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
void tripleCGUpdateCuda(const double &a, const double &b, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<tripleCGUpdate,0,1,1,1>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(0.0,0.0), x, y, z, w);
} else {
blasCuda<tripleCGUpdate,0,1,1,1>(make_double2(a, 0.0), make_double2(b, 0.0),
make_double2(0.0, 0.0), x, y, z, w);
}
}
} // namespace quda
| 1dc01742b291c8cb67f13c1986dbda695fd85474.cu | #include <stdlib.h>
#include <stdio.h>
#include <cstring> // needed for memset
#include <float_vector.h>
#include <tune_quda.h>
#include <typeinfo>
#include <quda_internal.h>
#include <blas_quda.h>
#include <color_spinor_field.h>
#include <face_quda.h> // this is where the MPI / QMP depdendent code is
#define checkSpinor(a, b) \
{ \
if (a.Precision() != b.Precision()) \
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
#define checkLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
namespace quda {
#include <texture.h>
unsigned long long blas_flops;
unsigned long long blas_bytes;
void zeroCuda(cudaColorSpinorField &a) { a.zero(); }
static cudaStream_t *blasStream;
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
void initReduce();
void endReduce();
void initBlas()
{
blasStream = &streams[Nstream-1];
initReduce();
}
void endBlas(void)
{
endReduce();
}
cudaStream_t* getBlasStream() { return blasStream; }
#include <blas_core.h>
#include <blas_mixed_core.h>
/**
Functor to perform the operation y = a*x + b*y
*/
template <typename Float2, typename FloatN>
struct axpby {
const Float2 a;
const Float2 b;
axpby(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = a.x*x + b.x*y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) {
blasCuda<axpby,0,1,0,0>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y += x
*/
template <typename Float2, typename FloatN>
struct xpy {
xpy(const Float2 &a, const Float2 &b, const Float2 &c) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y += x ; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<xpy,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y += a*x
*/
template <typename Float2, typename FloatN>
struct axpy {
const Float2 a;
axpy(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = a.x*x + y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpy,0,1,0,0>(make_double2(a,0.0), make_double2(1.0,0.0), make_double2(0.0,0.0),
x, y, x, x);
} else {
blasCuda<axpy,0,1,0,0>(make_double2(a, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
}
/**
Functor to perform the operation y = x + a*y
*/
template <typename Float2, typename FloatN>
struct xpay {
const Float2 a;
xpay(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y = x + a.x*y; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
void xpayCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) {
blasCuda<xpay,0,1,0,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0),
x, y, x, x);
}
/**
Functor to perform the operation y -= x;
*/
template <typename Float2, typename FloatN>
struct mxpy {
mxpy(const Float2 &a, const Float2 &b, const Float2 &c) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { y -= x; }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<mxpy,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to perform the operation x *= a
*/
template <typename Float2, typename FloatN>
struct ax {
const Float2 a;
ax(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(FloatN &x, const FloatN &y, const FloatN &z, const FloatN &w) { x *= a.x; }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 1; } //! flops per element
};
void axCuda(const double &a, cudaColorSpinorField &x) {
blasCuda<ax,1,0,0,0>(make_double2(a, 0.0), make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ void caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
__device__ void caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ void caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
template <typename Float2, typename FloatN>
struct caxpy {
const Float2 a;
caxpy(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ caxpy_(a, x, y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
void caxpyCuda(const Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
blasCuda<caxpy,0,1,0,0>(make_double2(REAL(a), IMAG(a)),
make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to perform the operation y = a*x + b*y (complex-valued)
*/
__device__ void caxpby_(const float2 &a, const float4 &x, const float2 &b, float4 &y)
{ float4 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
yy.z = a.x*x.z; yy.z -= a.y*x.w; yy.z += b.x*y.z; yy.z -= b.y*y.w;
yy.w = a.y*x.z; yy.w += a.x*x.w; yy.w += b.y*y.z; yy.w += b.x*y.w;
y = yy; }
__device__ void caxpby_(const float2 &a, const float2 &x, const float2 &b, float2 &y)
{ float2 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
y = yy; }
__device__ void caxpby_(const double2 &a, const double2 &x, const double2 &b, double2 &y)
{ double2 yy;
yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y;
yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y;
y = yy; }
template <typename Float2, typename FloatN>
struct caxpby {
const Float2 a;
const Float2 b;
caxpby(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, const FloatN &z, const FloatN &w) { caxpby_(a, x, b, y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 7; } //! flops per element
};
void caxpbyCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b, cudaColorSpinorField &y) {
blasCuda<caxpby,0,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)),
make_double2(0.0, 0.0), x, y, x, x);
}
/**
Functor to performs the operation z[i] = x[i] + a*y[i] + b*z[i]
*/
__device__ void cxpaypbz_(const float4 &x, const float2 &a, const float4 &y, const float2 &b, float4 &z) {
float4 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
zz.z = x.z + a.x*y.z; zz.z -= a.y*y.w; zz.z += b.x*z.z; zz.z -= b.y*z.w;
zz.w = x.w + a.y*y.z; zz.w += a.x*y.w; zz.w += b.y*z.z; zz.w += b.x*z.w;
z = zz;
}
__device__ void cxpaypbz_(const float2 &x, const float2 &a, const float2 &y, const float2 &b, float2 &z) {
float2 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
z = zz;
}
__device__ void cxpaypbz_(const double2 &x, const double2 &a, const double2 &y, const double2 &b, double2 &z) {
double2 zz;
zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y;
zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y;
z = zz;
}
template <typename Float2, typename FloatN>
struct cxpaypbz {
const Float2 a;
const Float2 b;
cxpaypbz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, FloatN &z, FloatN &w)
{ cxpaypbz_(x, a, y, b, z); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void cxpaypbzCuda(cudaColorSpinorField &x, const Complex &a, cudaColorSpinorField &y,
const Complex &b, cudaColorSpinorField &z) {
blasCuda<cxpaypbz,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)),
make_double2(0.0, 0.0), x, y, z, z);
}
/**
Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]
*/
template <typename Float2, typename FloatN>
struct axpyBzpcx {
const Float2 a;
const Float2 b;
const Float2 c;
axpyBzpcx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ y += a.x*x; x = b.x*z + c.x*x; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b,
cudaColorSpinorField& z, const double &c) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpyBzpcx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(c,0.0), x, y, z, x);
} else {
// swap arguments around
blasCuda<axpyBzpcx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(c,0.0), x, y, z, x);
}
}
/**
Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]
*/
template <typename Float2, typename FloatN>
struct axpyZpbx {
const Float2 a;
const Float2 b;
axpyZpbx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ y += a.x*x; x = z + b.x*x; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void axpyZpbxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y,
cudaColorSpinorField& z, const double &b) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<axpyZpbx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0),
x, y, z, x);
} else {
// swap arguments around
blasCuda<axpyZpbx,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0),
x, y, z, x);
}
}
/**
Functor performing the operations z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypzYmbw {
const Float2 a;
const Float2 b;
caxpbypzYmbw(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, FloatN &z, const FloatN &w)
{ caxpy_(a, x, z); caxpy_(b, y, z); caxpy_(-b, w, y); }
static int streams() { return 6; } //! total number of input and output streams
static int flops() { return 12; } //! flops per element
};
void caxpbypzYmbwCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) {
blasCuda<caxpbypzYmbw,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, z, w);
}
/**
Functor performing the operation y[i] += a*b*x[i], x[i] *= a
*/
template <typename Float2, typename FloatN>
struct cabxpyAx {
const Float2 a;
const Float2 b;
cabxpyAx(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ x *= a.x; caxpy_(b, x, y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void cabxpyAxCuda(const double &a, const Complex &b,
cudaColorSpinorField &x, cudaColorSpinorField &y) {
// swap arguments around
blasCuda<cabxpyAx,1,1,0,0>(make_double2(a,0.0), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, x, x);
}
/**
Functor performing the operation z[i] = a*x[i] + b*y[i] + z[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypz {
const Float2 a;
const Float2 b;
caxpbypz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, FloatN &z, const FloatN &w)
{ caxpy_(a, x, z); caxpy_(b, y, z); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void caxpbypzCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
blasCuda<caxpbypz,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(0.0,0.0), x, y, z, z);
}
/**
Functor Performing the operation w[i] = a*x[i] + b*y[i] + c*z[i] + w[i]
*/
template <typename Float2, typename FloatN>
struct caxpbypczpw {
const Float2 a;
const Float2 b;
const Float2 c;
caxpbypczpw(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; }
__device__ void operator()(const FloatN &x, const FloatN &y, const FloatN &z, FloatN &w)
{ caxpy_(a, x, w); caxpy_(b, y, w); caxpy_(c, z, w); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 5; } //! flops per element
};
void caxpbypczpwCuda(const Complex &a, cudaColorSpinorField &x, const Complex &b,
cudaColorSpinorField &y, const Complex &c, cudaColorSpinorField &z,
cudaColorSpinorField &w) {
blasCuda<caxpbypczpw,0,0,0,1>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)),
make_double2(REAL(c),IMAG(c)), x, y, z, w);
}
/**
double caxpyXmazCuda(c a, V x, V y, V z){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
*/
template <typename Float2, typename FloatN>
struct caxpyxmaz {
Float2 a;
caxpyxmaz(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; }
__device__ void operator()(FloatN &x, FloatN &y, const FloatN &z, const FloatN &w)
{ caxpy_(a, x, y); x-= a.x*z; }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
void caxpyXmazCuda(const Complex &a, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
blasCuda<caxpyxmaz,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0),
make_double2(0.0, 0.0), x, y, z, x);
}
/**
double tripleCGUpdate(d a, d b, V x, V y, V z, V w){}
First performs the operation y[i] = y[i] - a*x[i]
Second performs the operatio z[i] = z[i] + a*w[i]
Third performs the operation w[i] = y[i] + b*w[i]
First performs the operatio y[i] = y[i] + a*w[i]
Second performs the operation z[i] = z[i] - a*x[i]
Third performs the operation w[i] = z[i] + b*w[i]
*/
template <typename Float2, typename FloatN>
struct tripleCGUpdate {
Float2 a, b;
tripleCGUpdate(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; }
__device__ void operator()(const FloatN &x, FloatN &y, FloatN &z, FloatN &w)
//{ y -= a.x*x; z += a.x*w; w = y + b.x*w; }
{ y += a.x*w; z -= a.x*x; w = z + b.x*w; }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
void tripleCGUpdateCuda(const double &a, const double &b, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) {
if (x.Precision() != y.Precision()) {
// call hacked mixed precision kernel
mixed::blasCuda<tripleCGUpdate,0,1,1,1>(make_double2(a,0.0), make_double2(b,0.0),
make_double2(0.0,0.0), x, y, z, w);
} else {
blasCuda<tripleCGUpdate,0,1,1,1>(make_double2(a, 0.0), make_double2(b, 0.0),
make_double2(0.0, 0.0), x, y, z, w);
}
}
} // namespace quda
|
29f269a3603e02835f42b3c94e38f7bb47d8af50.hip | // !!! This is a file automatically generated by hipify!!!
#include"VehicleParticleFilter.cuh"
#define SIZEMIN 1.0f
#define SIZEMAX 5.0f
#define SEARCHDIM 50
PARTICLE_INITIALIZE_FUNC(Vehicle,initialState,randomOffset)
{
state.x=initialState.x+randomOffset.x;
state.y=initialState.y+randomOffset.y;
state.theta=initialState.theta+randomOffset.theta;
state.v=initialState.v+randomOffset.v;
state.width=initialState.width+randomOffset.width;
state.length=initialState.length+randomOffset.length;
state.wsigma=0;
state.lsigma=0;
}
PARTICLE_RANDOMNIZE_FUNC(Vehicle,randomOffset)
{
state.v+=randomOffset.v;
state.theta+=randomOffset.theta;
}
PARTICLE_UPDATE_FUNC(Vehicle,deltaMsec)
{
float msec=deltaMsec/1000.0;
float dis=msec*state.v;
state.x+=dis*cos(state.theta);
state.y+=dis*sin(state.theta);
}
PARTICLE_TRANSFORM_FUNC(Vehicle,transform)
{
transform.transformState2D(state.x,state.y,state.theta);
}
#define PI 3.14159265359
#define GETRECTCORNER(ctheta,stheta,cx,cy,x,y,corner,density,beamid) \
corner[0]=x*ctheta-y*stheta+cx; \
corner[1]=x*stheta+y*ctheta+cy; \
beamid[0]=(atan2(corner[1],corner[0])+PI)/density; \
corner[2]=x*ctheta+y*stheta+cx; \
corner[3]=x*stheta-y*ctheta+cy; \
beamid[1]=(atan2(corner[3],corner[2])+PI)/density; \
corner[4]=-x*ctheta+y*stheta+cx; \
corner[5]=-x*stheta-y*ctheta+cy; \
beamid[2]=(atan2(corner[5],corner[4])+PI)/density; \
corner[6]=-x*ctheta-y*stheta+cx; \
corner[7]=-x*stheta+y*ctheta+cy; \
beamid[3]=(atan2(corner[7],corner[6])+PI)/density; \
corner[8]=x*ctheta-y*stheta+cx; \
corner[9]=x*stheta+y*ctheta+cy; \
beamid[4]=(atan2(corner[9],corner[8])+PI)/density;
#define GETRECTEDGE(ox,oy,x,y,edgeid) \
if(ox>x) { \
if(oy>y) { edgeid[0]=0;edgeid[1]=3; } \
else if(oy<-y) { edgeid[0]=0;edgeid[1]=1; } \
else { edgeid[0]=0;edgeid[1]=-1; } } \
else if(ox<-x) { \
if(oy>y) { edgeid[0]=2;edgeid[1]=3; } \
else if(oy<-y) { edgeid[0]=2;edgeid[1]=1; } \
else { edgeid[0]=2;edgeid[1]=-1; } } \
else { \
if(oy>y) { edgeid[0]=3;edgeid[1]=-1; } \
else if(oy<-y) { edgeid[0]=1;edgeid[1]=-1; } \
else { edgeid[0]=-1;edgeid[0]=-1; } }
PARTICLE_MEASURE_FUNC(Vehicle,state,measureData)
{
float c=cos(state.theta);
float s=sin(state.theta);
float searchstep=SIZEMAX/(2*SEARCHDIM);
float S[SEARCHDIM][SEARCHDIM];
float corner[10];
int beamid[5];
int edgeid[2];
float density=2*PI/measureData.beamnum;
float thresh=0.05;
float ox=-c*state.x-s*state.y;
float oy=s*state.x-c*state.y;
float weight=-1;
int maxi,maxj;
float sumweight=0;
for(int i=0;i<SEARCHDIM;i++)
{
float x=(i+1)*searchstep;
for(int j=0;j<SEARCHDIM;j++)
{
float y=(j+1)*searchstep;
GETRECTCORNER(c,s,state.x,state.y,x,y,corner,density,beamid);
GETRECTEDGE(ox,oy,x,y,edgeid);
S[i][j]=1.0f;
for(int k=0;k<2;k++)
{
if(edgeid[k]<0)
{
break;
}
int count=0;
float score=1;
int startid=beamid[edgeid[k]];
int endid=beamid[edgeid[k]+1];
if(startid>endid)
{
endid+=measureData.beamnum;
}
for(int id=startid;id<=endid;id++)
{
int tmpid=id%measureData.beamnum;
if(measureData.beams[tmpid]<=0)
{
continue;
}
float theta=id*density-PI;
float bx=measureData.beams[tmpid]*cos(theta);
float by=measureData.beams[tmpid]*sin(theta);
float beta=(corner[2*edgeid[k]+1]*corner[2*edgeid[k]+2]-corner[2*edgeid[k]]*corner[2*edgeid[k]+3])
/(by*(corner[2*edgeid[k]+2]-corner[2*edgeid[k]])-bx*(corner[2*edgeid[k]+3]-corner[2*edgeid[k]+1]));
float distance=(beta-1)*measureData.beams[tmpid];
if(distance>thresh)
{
score*=0.5+(expf(-powf(distance-thresh,2)/0.01))*9.5;
//score*=0.5f;
}
else if(distance<-thresh)
{
score*=0.1+(expf(-powf(distance+thresh,2)/0.01))*9.9;
//score*=0.1f;
}
else
{
score*=10+(expf(-powf(distance,2)/0.01))*10;
//score*=10.0f;
}
count++;
}
if(count>0)
{
S[i][j]*=powf(score,1.0f/count);
}
else
{
S[i][j]=0;
break;
}
}
sumweight+=S[i][j];
if(weight<S[i][j])
{
weight=S[i][j];
maxi=i;
maxj=j;
}
}
}
if(weight<=0||sumweight==0)
{
return 0;
}
weight/=sumweight;
float width=(maxj+1)*searchstep*2;
float length=(maxi+1)*searchstep*2;
float wsigma=0;
float wsum=0;
float lsigma=0;
float lsum=0;
for(int k=0;k<SEARCHDIM;k++)
{
float wdis=(maxj-k)*searchstep*2;
wsigma+=S[maxi][k]/sumweight*wdis*wdis;
wsum+=S[maxi][k]/sumweight;
float ldis=(maxi-k)*searchstep*2;
lsigma+=S[k][maxj]/sumweight*ldis*ldis;
lsum+=S[k][maxj]/sumweight;
}
wsigma/=wsum;
lsigma/=lsum;
if(state.lsigma*state.wsigma==0)
{
state.width=width;
state.length=length;
state.wsigma=wsigma;
state.lsigma=lsigma;
return weight;
}
float tmpwsigma=wsigma*state.wsigma/(wsigma+state.wsigma);
float tmplsigma=lsigma*state.lsigma/(lsigma+state.lsigma);
weight*=sqrt((tmpwsigma*tmplsigma)/(state.wsigma*state.lsigma));
float wdis=width-state.width;
float ldis=length-state.length;
weight*=expf(-(wdis*wdis)/(wsigma+state.wsigma)-(ldis*ldis)/(lsigma+state.lsigma));
state.width=width;
state.length=length;
state.wsigma=tmpwsigma;
state.lsigma=tmplsigma;
return weight;
}
PARTICLE_FILTER_MEASURE_FUNC(Vehicle,measureData)
{
MEASUREDATA_TYPE(Vehicle) virtualscan;
virtualscan.beamnum=measureData.beamnum;
size_t beamsize=virtualscan.beamnum*sizeof(double);
hipMalloc((void **)(&(virtualscan.beams)),beamsize);
hipMemcpy(virtualscan.beams,measureData.beams,beamsize,hipMemcpyHostToDevice);
Vehicle_Base::measureParticles(virtualscan);
hipDeviceSynchronize();
hipFree(virtualscan.beams);
}
PARTICLE_FILTER_INTERACT_FUNCS(Vehicle)
| 29f269a3603e02835f42b3c94e38f7bb47d8af50.cu | #include"VehicleParticleFilter.cuh"
#define SIZEMIN 1.0f
#define SIZEMAX 5.0f
#define SEARCHDIM 50
PARTICLE_INITIALIZE_FUNC(Vehicle,initialState,randomOffset)
{
state.x=initialState.x+randomOffset.x;
state.y=initialState.y+randomOffset.y;
state.theta=initialState.theta+randomOffset.theta;
state.v=initialState.v+randomOffset.v;
state.width=initialState.width+randomOffset.width;
state.length=initialState.length+randomOffset.length;
state.wsigma=0;
state.lsigma=0;
}
PARTICLE_RANDOMNIZE_FUNC(Vehicle,randomOffset)
{
state.v+=randomOffset.v;
state.theta+=randomOffset.theta;
}
PARTICLE_UPDATE_FUNC(Vehicle,deltaMsec)
{
float msec=deltaMsec/1000.0;
float dis=msec*state.v;
state.x+=dis*cos(state.theta);
state.y+=dis*sin(state.theta);
}
PARTICLE_TRANSFORM_FUNC(Vehicle,transform)
{
transform.transformState2D(state.x,state.y,state.theta);
}
#define PI 3.14159265359
#define GETRECTCORNER(ctheta,stheta,cx,cy,x,y,corner,density,beamid) \
corner[0]=x*ctheta-y*stheta+cx; \
corner[1]=x*stheta+y*ctheta+cy; \
beamid[0]=(atan2(corner[1],corner[0])+PI)/density; \
corner[2]=x*ctheta+y*stheta+cx; \
corner[3]=x*stheta-y*ctheta+cy; \
beamid[1]=(atan2(corner[3],corner[2])+PI)/density; \
corner[4]=-x*ctheta+y*stheta+cx; \
corner[5]=-x*stheta-y*ctheta+cy; \
beamid[2]=(atan2(corner[5],corner[4])+PI)/density; \
corner[6]=-x*ctheta-y*stheta+cx; \
corner[7]=-x*stheta+y*ctheta+cy; \
beamid[3]=(atan2(corner[7],corner[6])+PI)/density; \
corner[8]=x*ctheta-y*stheta+cx; \
corner[9]=x*stheta+y*ctheta+cy; \
beamid[4]=(atan2(corner[9],corner[8])+PI)/density;
#define GETRECTEDGE(ox,oy,x,y,edgeid) \
if(ox>x) { \
if(oy>y) { edgeid[0]=0;edgeid[1]=3; } \
else if(oy<-y) { edgeid[0]=0;edgeid[1]=1; } \
else { edgeid[0]=0;edgeid[1]=-1; } } \
else if(ox<-x) { \
if(oy>y) { edgeid[0]=2;edgeid[1]=3; } \
else if(oy<-y) { edgeid[0]=2;edgeid[1]=1; } \
else { edgeid[0]=2;edgeid[1]=-1; } } \
else { \
if(oy>y) { edgeid[0]=3;edgeid[1]=-1; } \
else if(oy<-y) { edgeid[0]=1;edgeid[1]=-1; } \
else { edgeid[0]=-1;edgeid[0]=-1; } }
PARTICLE_MEASURE_FUNC(Vehicle,state,measureData)
{
float c=cos(state.theta);
float s=sin(state.theta);
float searchstep=SIZEMAX/(2*SEARCHDIM);
float S[SEARCHDIM][SEARCHDIM];
float corner[10];
int beamid[5];
int edgeid[2];
float density=2*PI/measureData.beamnum;
float thresh=0.05;
float ox=-c*state.x-s*state.y;
float oy=s*state.x-c*state.y;
float weight=-1;
int maxi,maxj;
float sumweight=0;
for(int i=0;i<SEARCHDIM;i++)
{
float x=(i+1)*searchstep;
for(int j=0;j<SEARCHDIM;j++)
{
float y=(j+1)*searchstep;
GETRECTCORNER(c,s,state.x,state.y,x,y,corner,density,beamid);
GETRECTEDGE(ox,oy,x,y,edgeid);
S[i][j]=1.0f;
for(int k=0;k<2;k++)
{
if(edgeid[k]<0)
{
break;
}
int count=0;
float score=1;
int startid=beamid[edgeid[k]];
int endid=beamid[edgeid[k]+1];
if(startid>endid)
{
endid+=measureData.beamnum;
}
for(int id=startid;id<=endid;id++)
{
int tmpid=id%measureData.beamnum;
if(measureData.beams[tmpid]<=0)
{
continue;
}
float theta=id*density-PI;
float bx=measureData.beams[tmpid]*cos(theta);
float by=measureData.beams[tmpid]*sin(theta);
float beta=(corner[2*edgeid[k]+1]*corner[2*edgeid[k]+2]-corner[2*edgeid[k]]*corner[2*edgeid[k]+3])
/(by*(corner[2*edgeid[k]+2]-corner[2*edgeid[k]])-bx*(corner[2*edgeid[k]+3]-corner[2*edgeid[k]+1]));
float distance=(beta-1)*measureData.beams[tmpid];
if(distance>thresh)
{
score*=0.5+(expf(-powf(distance-thresh,2)/0.01))*9.5;
//score*=0.5f;
}
else if(distance<-thresh)
{
score*=0.1+(expf(-powf(distance+thresh,2)/0.01))*9.9;
//score*=0.1f;
}
else
{
score*=10+(expf(-powf(distance,2)/0.01))*10;
//score*=10.0f;
}
count++;
}
if(count>0)
{
S[i][j]*=powf(score,1.0f/count);
}
else
{
S[i][j]=0;
break;
}
}
sumweight+=S[i][j];
if(weight<S[i][j])
{
weight=S[i][j];
maxi=i;
maxj=j;
}
}
}
if(weight<=0||sumweight==0)
{
return 0;
}
weight/=sumweight;
float width=(maxj+1)*searchstep*2;
float length=(maxi+1)*searchstep*2;
float wsigma=0;
float wsum=0;
float lsigma=0;
float lsum=0;
for(int k=0;k<SEARCHDIM;k++)
{
float wdis=(maxj-k)*searchstep*2;
wsigma+=S[maxi][k]/sumweight*wdis*wdis;
wsum+=S[maxi][k]/sumweight;
float ldis=(maxi-k)*searchstep*2;
lsigma+=S[k][maxj]/sumweight*ldis*ldis;
lsum+=S[k][maxj]/sumweight;
}
wsigma/=wsum;
lsigma/=lsum;
if(state.lsigma*state.wsigma==0)
{
state.width=width;
state.length=length;
state.wsigma=wsigma;
state.lsigma=lsigma;
return weight;
}
float tmpwsigma=wsigma*state.wsigma/(wsigma+state.wsigma);
float tmplsigma=lsigma*state.lsigma/(lsigma+state.lsigma);
weight*=sqrt((tmpwsigma*tmplsigma)/(state.wsigma*state.lsigma));
float wdis=width-state.width;
float ldis=length-state.length;
weight*=expf(-(wdis*wdis)/(wsigma+state.wsigma)-(ldis*ldis)/(lsigma+state.lsigma));
state.width=width;
state.length=length;
state.wsigma=tmpwsigma;
state.lsigma=tmplsigma;
return weight;
}
PARTICLE_FILTER_MEASURE_FUNC(Vehicle,measureData)
{
MEASUREDATA_TYPE(Vehicle) virtualscan;
virtualscan.beamnum=measureData.beamnum;
size_t beamsize=virtualscan.beamnum*sizeof(double);
cudaMalloc((void **)(&(virtualscan.beams)),beamsize);
cudaMemcpy(virtualscan.beams,measureData.beams,beamsize,cudaMemcpyHostToDevice);
Vehicle_Base::measureParticles(virtualscan);
cudaDeviceSynchronize();
cudaFree(virtualscan.beams);
}
PARTICLE_FILTER_INTERACT_FUNCS(Vehicle)
|
9997915d644afbb5cea0039c7212e2af65eb19f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include "stdio.h"
#include "gemv.h"
#ifndef CUDA_CHECK
#define CUDA_CHECK(code) \
{ \
hipError_t status = (code); \
if ((status) != hipSuccess) { \
fprintf(stderr, "CUDA error in file: %s, line: %d, %s\n", __FILE__, \
__LINE__, hipGetErrorString((status))); \
exit((status)); \
} \
}
#endif
static const int threadsPerBlock = 128;
static const int blocksPerGrid = 32;
__global__ void kernel_naive(const Matrix A, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < A.n_row; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A.n_col; j++) {
temp += A.data[i * A.n_col + j] * X.data[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_coalesce(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j++) {
temp += A_trans.data[i + j * A_trans.n_col] * X.data[j];
}
Y.data[i] = temp;
}
}
__constant__ float DATA_CONSTANT[16384];
__global__ void kernel_constant(const Matrix A_trans, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j++) {
temp += A_trans.data[i + j * A_trans.n_col] * DATA_CONSTANT[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_constant_loop_unroll(const Matrix A_trans, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j += threadsPerBlock) {
// loop unroll
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[i + (j + k) * A_trans.n_col] * DATA_CONSTANT[j + k];
}
}
for (int j = A_trans.n_row - A_trans.n_row % threadsPerBlock; j < A_trans.n_row; ++j) {
temp += A_trans.data[i + j * A_trans.n_col] * DATA_CONSTANT[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
for (int j = cid; j < cache_last; j += threadsPerBlock) {
__syncthreads();
cache[cid] = X.data[j];
__syncthreads();
int begin = j - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < blockDim.x; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared_loop_unroll(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
for (int j = cid; j < cache_last; j += threadsPerBlock) {
__syncthreads();
cache[cid] = X.data[j];
__syncthreads();
int begin = j - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared_loop_unroll_prefetch(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// prefetch
__syncthreads();
cache[cid] = X.data[cid];
__syncthreads();
// j: current row for load cache
for (int j = cid + threadsPerBlock; j < cache_last; j += threadsPerBlock) {
// prefetch
float reg = X.data[j];
int begin = j - threadsPerBlock - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
__syncthreads();
cache[cid] = reg;
__syncthreads();
}
int begin = cache_last - threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shuffle_loop_unroll(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int laneId = threadIdx.x % 32;
const int cache_last = A_trans.n_row - A_trans.n_row % 32;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
float shuffle_val = 0;
for (int j = laneId; j < cache_last; j += 32) {
shuffle_val = X.data[j];
int cache_offset = j / 32 * 32;
// k: current row for calculate
for (int k = 0; k < 32; k++) {
temp += A_trans.data[(cache_offset + k) * A_trans.n_col + i]
* __shfl_sync(0xffffffff, shuffle_val, k, 32);
}
}
if (cache_last + laneId < A_trans.n_row) {
shuffle_val = X.data[cache_last + laneId];
}
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i]
* __shfl_sync(0xffffffff, shuffle_val, k - cache_last, 32);
}
Y.data[i] = temp;
}
}
void gemv(const Matrix A, const Vector X, Vector Y) {
Matrix A_trans = transpose(A);
Matrix d_A;
Matrix d_A_trans;
Vector d_X;
Vector d_Y;
d_A.n_col = A.n_col;
d_A.n_row = A.n_row;
d_A_trans.n_col = A_trans.n_col;
d_A_trans.n_row = A_trans.n_row;
d_X.length = X.length;
d_Y.length = Y.length;
int size_A = d_A.n_col * d_A.n_row;
int size_X = d_X.length;
int size_Y = d_Y.length;
CUDA_CHECK(hipMalloc(&d_A.data, size_A * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_A_trans.data, size_A * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_X.data, size_X * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_Y.data, size_Y * sizeof(float)));
CUDA_CHECK(hipMemcpy(d_A.data, A.data, size_A * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_A_trans.data, A_trans.data, size_A*sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(DATA_CONSTANT, X.data, size_X*sizeof(float)));
CUDA_CHECK(hipMemcpy(d_X.data, X.data, size_X * sizeof(float), hipMemcpyHostToDevice));
// invoke kernel
dim3 dims_block(threadsPerBlock);
dim3 dims_grid(blocksPerGrid);
const int n_rounds = 10;
// warm up
hipLaunchKernelGGL(( kernel_naive), dim3(dims_grid), dim3(dims_block), 0, 0, d_A, d_X, d_Y);
// naive
float elapsedTime_naive;
hipEvent_t start_naive, stop_naive;
CUDA_CHECK(hipEventCreate(&start_naive));
CUDA_CHECK(hipEventCreate(&stop_naive));
CUDA_CHECK(hipEventRecord(start_naive, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_naive), dim3(dims_grid), dim3(dims_block), 0, 0, d_A, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_naive, 0));
CUDA_CHECK(hipEventSynchronize(stop_naive));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_naive, start_naive, stop_naive));
CUDA_CHECK(hipEventDestroy(start_naive));
CUDA_CHECK(hipEventDestroy(stop_naive));
printf("Time of naive: %fms\n", elapsedTime_naive / n_rounds);
// coalesce
float elapsedTime_coalesce;
hipEvent_t start_coalesce, stop_coalesce;
CUDA_CHECK(hipEventCreate(&start_coalesce));
CUDA_CHECK(hipEventCreate(&stop_coalesce));
CUDA_CHECK(hipEventRecord(start_coalesce, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_coalesce), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_coalesce, 0));
CUDA_CHECK(hipEventSynchronize(stop_coalesce));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_coalesce, start_coalesce, stop_coalesce));
CUDA_CHECK(hipEventDestroy(start_coalesce));
CUDA_CHECK(hipEventDestroy(stop_coalesce));
printf("Time of coalesce: %fms\n", elapsedTime_coalesce / n_rounds);
// constant
float elapsedTime_constant;
hipEvent_t start_constant, stop_constant;
CUDA_CHECK(hipEventCreate(&start_constant));
CUDA_CHECK(hipEventCreate(&stop_constant));
CUDA_CHECK(hipEventRecord(start_constant, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_constant), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_Y);
CUDA_CHECK(hipEventRecord(stop_constant, 0));
CUDA_CHECK(hipEventSynchronize(stop_constant));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_constant, start_constant, stop_constant));
CUDA_CHECK(hipEventDestroy(start_constant));
CUDA_CHECK(hipEventDestroy(stop_constant));
printf("Time of constant: %fms\n", elapsedTime_constant / n_rounds);
// constant_loop_unroll
float elapsedTime_constant_loop_unroll;
hipEvent_t start_constant_loop_unroll, stop_constant_loop_unroll;
CUDA_CHECK(hipEventCreate(&start_constant_loop_unroll));
CUDA_CHECK(hipEventCreate(&stop_constant_loop_unroll));
CUDA_CHECK(hipEventRecord(start_constant_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_constant_loop_unroll), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_Y);
CUDA_CHECK(hipEventRecord(stop_constant_loop_unroll, 0));
CUDA_CHECK(hipEventSynchronize(stop_constant_loop_unroll));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_constant_loop_unroll, start_constant_loop_unroll, stop_constant_loop_unroll));
CUDA_CHECK(hipEventDestroy(start_constant_loop_unroll));
CUDA_CHECK(hipEventDestroy(stop_constant_loop_unroll));
printf("Time of constant_loop_unroll: %fms\n", elapsedTime_constant_loop_unroll / n_rounds);
// shared
float elapsedTime_shared;
hipEvent_t start_shared, stop_shared;
CUDA_CHECK(hipEventCreate(&start_shared));
CUDA_CHECK(hipEventCreate(&stop_shared));
CUDA_CHECK(hipEventRecord(start_shared, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_shared), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_shared, 0));
CUDA_CHECK(hipEventSynchronize(stop_shared));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_shared, start_shared, stop_shared));
CUDA_CHECK(hipEventDestroy(start_shared));
CUDA_CHECK(hipEventDestroy(stop_shared));
printf("Time of shared: %fms\n", elapsedTime_shared / n_rounds);
// shared_loop_unroll
float elapsedTime_shared_loop_unroll;
hipEvent_t start_shared_loop_unroll, stop_shared_loop_unroll;
CUDA_CHECK(hipEventCreate(&start_shared_loop_unroll));
CUDA_CHECK(hipEventCreate(&stop_shared_loop_unroll));
CUDA_CHECK(hipEventRecord(start_shared_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_shared_loop_unroll), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_shared_loop_unroll, 0));
CUDA_CHECK(hipEventSynchronize(stop_shared_loop_unroll));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_shared_loop_unroll, start_shared_loop_unroll, stop_shared_loop_unroll));
CUDA_CHECK(hipEventDestroy(start_shared_loop_unroll));
CUDA_CHECK(hipEventDestroy(stop_shared_loop_unroll));
printf("Time of shared_loop_unroll: %fms\n", elapsedTime_shared_loop_unroll / n_rounds);
// shared_loop_unroll_prefetch
float elapsedTime_shared_loop_unroll_prefetch;
hipEvent_t start_shared_loop_unroll_prefetch, stop_shared_loop_unroll_prefetch;
CUDA_CHECK(hipEventCreate(&start_shared_loop_unroll_prefetch));
CUDA_CHECK(hipEventCreate(&stop_shared_loop_unroll_prefetch));
CUDA_CHECK(hipEventRecord(start_shared_loop_unroll_prefetch, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_shared_loop_unroll_prefetch), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_shared_loop_unroll_prefetch, 0));
CUDA_CHECK(hipEventSynchronize(stop_shared_loop_unroll_prefetch));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_shared_loop_unroll_prefetch, start_shared_loop_unroll_prefetch, stop_shared_loop_unroll_prefetch));
CUDA_CHECK(hipEventDestroy(start_shared_loop_unroll_prefetch));
CUDA_CHECK(hipEventDestroy(stop_shared_loop_unroll_prefetch));
printf("Time of shared_loop_unroll_prefetch: %fms\n", elapsedTime_shared_loop_unroll_prefetch / n_rounds);
// shuffle_loop_unroll
float elapsedTime_shuffle_loop_unroll;
hipEvent_t start_shuffle_loop_unroll, stop_shuffle_loop_unroll;
CUDA_CHECK(hipEventCreate(&start_shuffle_loop_unroll));
CUDA_CHECK(hipEventCreate(&stop_shuffle_loop_unroll));
CUDA_CHECK(hipEventRecord(start_shuffle_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
hipLaunchKernelGGL(( kernel_shuffle_loop_unroll), dim3(dims_grid), dim3(dims_block), 0, 0, d_A_trans, d_X, d_Y);
CUDA_CHECK(hipEventRecord(stop_shuffle_loop_unroll, 0));
CUDA_CHECK(hipEventSynchronize(stop_shuffle_loop_unroll));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_shuffle_loop_unroll, start_shuffle_loop_unroll, stop_shuffle_loop_unroll));
CUDA_CHECK(hipEventDestroy(start_shuffle_loop_unroll));
CUDA_CHECK(hipEventDestroy(stop_shuffle_loop_unroll));
printf("Time of shuffle_loop_unroll: %fms\n", elapsedTime_shuffle_loop_unroll / n_rounds);
// cublas
float elapsedTime_cublas;
hipEvent_t start_cublas, stop_cublas;
CUDA_CHECK(hipEventCreate(&start_cublas));
CUDA_CHECK(hipEventCreate(&stop_cublas));
hipblasHandle_t handle;
(hipblasCreate(&handle));
(hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
// transpose by default because of column priority
hipblasOperation_t trans = HIPBLAS_OP_N;
const int dim0_tensor1 = d_A_trans.n_row;
const int dim1_tensor1 = d_A_trans.n_col;
const int lda = dim1_tensor1;
const int incx = 1;
const int incy = 1;
const float *a = d_A_trans.data;
const float *x = d_X.data;
float *y = d_Y.data;
float alpha = 1, beta = 0;
CUDA_CHECK(hipEventRecord(start_cublas, 0));
for (int i = 0; i < n_rounds; ++i)
(hipblasSgemv(handle, trans, dim1_tensor1, dim0_tensor1,
&alpha, a, lda, x, incx, &beta, y, incy));
CUDA_CHECK(hipEventRecord(stop_cublas, 0));
CUDA_CHECK(hipEventSynchronize(stop_cublas));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_cublas, start_cublas, stop_cublas));
CUDA_CHECK(hipEventDestroy(start_cublas));
CUDA_CHECK(hipEventDestroy(stop_cublas));
printf("Time of cublas: %fms\n", elapsedTime_cublas / n_rounds);
// copy data from device to host
CUDA_CHECK(hipMemcpy(Y.data, d_Y.data, size_Y * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(d_A.data));
CUDA_CHECK(hipFree(d_X.data));
CUDA_CHECK(hipFree(d_Y.data));
free(A_trans.data);
return;
} | 9997915d644afbb5cea0039c7212e2af65eb19f4.cu | #include "cuda_runtime.h"
#include <cublas_v2.h>
#include "stdio.h"
#include "gemv.h"
#ifndef CUDA_CHECK
#define CUDA_CHECK(code) \
{ \
cudaError_t status = (code); \
if ((status) != cudaSuccess) { \
fprintf(stderr, "CUDA error in file: %s, line: %d, %s\n", __FILE__, \
__LINE__, cudaGetErrorString((status))); \
exit((status)); \
} \
}
#endif
static const int threadsPerBlock = 128;
static const int blocksPerGrid = 32;
__global__ void kernel_naive(const Matrix A, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < A.n_row; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A.n_col; j++) {
temp += A.data[i * A.n_col + j] * X.data[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_coalesce(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j++) {
temp += A_trans.data[i + j * A_trans.n_col] * X.data[j];
}
Y.data[i] = temp;
}
}
__constant__ float DATA_CONSTANT[16384];
__global__ void kernel_constant(const Matrix A_trans, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j++) {
temp += A_trans.data[i + j * A_trans.n_col] * DATA_CONSTANT[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_constant_loop_unroll(const Matrix A_trans, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > A_trans.n_col) {
return;
}
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
for (int j = 0; j < A_trans.n_row; j += threadsPerBlock) {
// loop unroll
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[i + (j + k) * A_trans.n_col] * DATA_CONSTANT[j + k];
}
}
for (int j = A_trans.n_row - A_trans.n_row % threadsPerBlock; j < A_trans.n_row; ++j) {
temp += A_trans.data[i + j * A_trans.n_col] * DATA_CONSTANT[j];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
for (int j = cid; j < cache_last; j += threadsPerBlock) {
__syncthreads();
cache[cid] = X.data[j];
__syncthreads();
int begin = j - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < blockDim.x; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared_loop_unroll(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
for (int j = cid; j < cache_last; j += threadsPerBlock) {
__syncthreads();
cache[cid] = X.data[j];
__syncthreads();
int begin = j - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shared_loop_unroll_prefetch(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int cid = threadIdx.x;
__shared__ float cache[threadsPerBlock];
const int cache_last = A_trans.n_row - A_trans.n_row % threadsPerBlock;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// prefetch
__syncthreads();
cache[cid] = X.data[cid];
__syncthreads();
// j: current row for load cache
for (int j = cid + threadsPerBlock; j < cache_last; j += threadsPerBlock) {
// prefetch
float reg = X.data[j];
int begin = j - threadsPerBlock - j % threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
__syncthreads();
cache[cid] = reg;
__syncthreads();
}
int begin = cache_last - threadsPerBlock;
// k: current row for calculate
for (int k = 0; k < threadsPerBlock; ++k) {
temp += A_trans.data[(k + begin) * A_trans.n_col + i] * cache[k];
}
__syncthreads();
if (cache_last + cid < A_trans.n_row) {
cache[cid] = X.data[cache_last + cid];
}
__syncthreads();
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i] * cache[k - cache_last];
}
Y.data[i] = temp;
}
}
__global__ void kernel_shuffle_loop_unroll(const Matrix A_trans, const Vector X, Vector Y) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int laneId = threadIdx.x % 32;
const int cache_last = A_trans.n_row - A_trans.n_row % 32;
// i: current col
for (int i = tid; i < A_trans.n_col; i += threadsPerBlock * blocksPerGrid) {
float temp = 0;
// j: current row for load cache
float shuffle_val = 0;
for (int j = laneId; j < cache_last; j += 32) {
shuffle_val = X.data[j];
int cache_offset = j / 32 * 32;
// k: current row for calculate
for (int k = 0; k < 32; k++) {
temp += A_trans.data[(cache_offset + k) * A_trans.n_col + i]
* __shfl_sync(0xffffffff, shuffle_val, k, 32);
}
}
if (cache_last + laneId < A_trans.n_row) {
shuffle_val = X.data[cache_last + laneId];
}
for (int k = cache_last; k < A_trans.n_row; k++) {
temp += A_trans.data[k * A_trans.n_col + i]
* __shfl_sync(0xffffffff, shuffle_val, k - cache_last, 32);
}
Y.data[i] = temp;
}
}
void gemv(const Matrix A, const Vector X, Vector Y) {
Matrix A_trans = transpose(A);
Matrix d_A;
Matrix d_A_trans;
Vector d_X;
Vector d_Y;
d_A.n_col = A.n_col;
d_A.n_row = A.n_row;
d_A_trans.n_col = A_trans.n_col;
d_A_trans.n_row = A_trans.n_row;
d_X.length = X.length;
d_Y.length = Y.length;
int size_A = d_A.n_col * d_A.n_row;
int size_X = d_X.length;
int size_Y = d_Y.length;
CUDA_CHECK(cudaMalloc(&d_A.data, size_A * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_A_trans.data, size_A * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_X.data, size_X * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_Y.data, size_Y * sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_A.data, A.data, size_A * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_A_trans.data, A_trans.data, size_A*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(DATA_CONSTANT, X.data, size_X*sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_X.data, X.data, size_X * sizeof(float), cudaMemcpyHostToDevice));
// invoke kernel
dim3 dims_block(threadsPerBlock);
dim3 dims_grid(blocksPerGrid);
const int n_rounds = 10;
// warm up
kernel_naive<<<dims_grid, dims_block>>>(d_A, d_X, d_Y);
// naive
float elapsedTime_naive;
cudaEvent_t start_naive, stop_naive;
CUDA_CHECK(cudaEventCreate(&start_naive));
CUDA_CHECK(cudaEventCreate(&stop_naive));
CUDA_CHECK(cudaEventRecord(start_naive, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_naive<<<dims_grid, dims_block>>>(d_A, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_naive, 0));
CUDA_CHECK(cudaEventSynchronize(stop_naive));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_naive, start_naive, stop_naive));
CUDA_CHECK(cudaEventDestroy(start_naive));
CUDA_CHECK(cudaEventDestroy(stop_naive));
printf("Time of naive: %fms\n", elapsedTime_naive / n_rounds);
// coalesce
float elapsedTime_coalesce;
cudaEvent_t start_coalesce, stop_coalesce;
CUDA_CHECK(cudaEventCreate(&start_coalesce));
CUDA_CHECK(cudaEventCreate(&stop_coalesce));
CUDA_CHECK(cudaEventRecord(start_coalesce, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_coalesce<<<dims_grid, dims_block>>>(d_A_trans, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_coalesce, 0));
CUDA_CHECK(cudaEventSynchronize(stop_coalesce));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_coalesce, start_coalesce, stop_coalesce));
CUDA_CHECK(cudaEventDestroy(start_coalesce));
CUDA_CHECK(cudaEventDestroy(stop_coalesce));
printf("Time of coalesce: %fms\n", elapsedTime_coalesce / n_rounds);
// constant
float elapsedTime_constant;
cudaEvent_t start_constant, stop_constant;
CUDA_CHECK(cudaEventCreate(&start_constant));
CUDA_CHECK(cudaEventCreate(&stop_constant));
CUDA_CHECK(cudaEventRecord(start_constant, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_constant<<<dims_grid, dims_block>>>(d_A_trans, d_Y);
CUDA_CHECK(cudaEventRecord(stop_constant, 0));
CUDA_CHECK(cudaEventSynchronize(stop_constant));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_constant, start_constant, stop_constant));
CUDA_CHECK(cudaEventDestroy(start_constant));
CUDA_CHECK(cudaEventDestroy(stop_constant));
printf("Time of constant: %fms\n", elapsedTime_constant / n_rounds);
// constant_loop_unroll
float elapsedTime_constant_loop_unroll;
cudaEvent_t start_constant_loop_unroll, stop_constant_loop_unroll;
CUDA_CHECK(cudaEventCreate(&start_constant_loop_unroll));
CUDA_CHECK(cudaEventCreate(&stop_constant_loop_unroll));
CUDA_CHECK(cudaEventRecord(start_constant_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_constant_loop_unroll<<<dims_grid, dims_block>>>(d_A_trans, d_Y);
CUDA_CHECK(cudaEventRecord(stop_constant_loop_unroll, 0));
CUDA_CHECK(cudaEventSynchronize(stop_constant_loop_unroll));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_constant_loop_unroll, start_constant_loop_unroll, stop_constant_loop_unroll));
CUDA_CHECK(cudaEventDestroy(start_constant_loop_unroll));
CUDA_CHECK(cudaEventDestroy(stop_constant_loop_unroll));
printf("Time of constant_loop_unroll: %fms\n", elapsedTime_constant_loop_unroll / n_rounds);
// shared
float elapsedTime_shared;
cudaEvent_t start_shared, stop_shared;
CUDA_CHECK(cudaEventCreate(&start_shared));
CUDA_CHECK(cudaEventCreate(&stop_shared));
CUDA_CHECK(cudaEventRecord(start_shared, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_shared<<<dims_grid, dims_block>>>(d_A_trans, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_shared, 0));
CUDA_CHECK(cudaEventSynchronize(stop_shared));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_shared, start_shared, stop_shared));
CUDA_CHECK(cudaEventDestroy(start_shared));
CUDA_CHECK(cudaEventDestroy(stop_shared));
printf("Time of shared: %fms\n", elapsedTime_shared / n_rounds);
// shared_loop_unroll
float elapsedTime_shared_loop_unroll;
cudaEvent_t start_shared_loop_unroll, stop_shared_loop_unroll;
CUDA_CHECK(cudaEventCreate(&start_shared_loop_unroll));
CUDA_CHECK(cudaEventCreate(&stop_shared_loop_unroll));
CUDA_CHECK(cudaEventRecord(start_shared_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_shared_loop_unroll<<<dims_grid, dims_block>>>(d_A_trans, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_shared_loop_unroll, 0));
CUDA_CHECK(cudaEventSynchronize(stop_shared_loop_unroll));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_shared_loop_unroll, start_shared_loop_unroll, stop_shared_loop_unroll));
CUDA_CHECK(cudaEventDestroy(start_shared_loop_unroll));
CUDA_CHECK(cudaEventDestroy(stop_shared_loop_unroll));
printf("Time of shared_loop_unroll: %fms\n", elapsedTime_shared_loop_unroll / n_rounds);
// shared_loop_unroll_prefetch
float elapsedTime_shared_loop_unroll_prefetch;
cudaEvent_t start_shared_loop_unroll_prefetch, stop_shared_loop_unroll_prefetch;
CUDA_CHECK(cudaEventCreate(&start_shared_loop_unroll_prefetch));
CUDA_CHECK(cudaEventCreate(&stop_shared_loop_unroll_prefetch));
CUDA_CHECK(cudaEventRecord(start_shared_loop_unroll_prefetch, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_shared_loop_unroll_prefetch<<<dims_grid, dims_block>>>(d_A_trans, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_shared_loop_unroll_prefetch, 0));
CUDA_CHECK(cudaEventSynchronize(stop_shared_loop_unroll_prefetch));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_shared_loop_unroll_prefetch, start_shared_loop_unroll_prefetch, stop_shared_loop_unroll_prefetch));
CUDA_CHECK(cudaEventDestroy(start_shared_loop_unroll_prefetch));
CUDA_CHECK(cudaEventDestroy(stop_shared_loop_unroll_prefetch));
printf("Time of shared_loop_unroll_prefetch: %fms\n", elapsedTime_shared_loop_unroll_prefetch / n_rounds);
// shuffle_loop_unroll
float elapsedTime_shuffle_loop_unroll;
cudaEvent_t start_shuffle_loop_unroll, stop_shuffle_loop_unroll;
CUDA_CHECK(cudaEventCreate(&start_shuffle_loop_unroll));
CUDA_CHECK(cudaEventCreate(&stop_shuffle_loop_unroll));
CUDA_CHECK(cudaEventRecord(start_shuffle_loop_unroll, 0));
for (int i = 0; i < n_rounds; ++i)
kernel_shuffle_loop_unroll<<<dims_grid, dims_block>>>(d_A_trans, d_X, d_Y);
CUDA_CHECK(cudaEventRecord(stop_shuffle_loop_unroll, 0));
CUDA_CHECK(cudaEventSynchronize(stop_shuffle_loop_unroll));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_shuffle_loop_unroll, start_shuffle_loop_unroll, stop_shuffle_loop_unroll));
CUDA_CHECK(cudaEventDestroy(start_shuffle_loop_unroll));
CUDA_CHECK(cudaEventDestroy(stop_shuffle_loop_unroll));
printf("Time of shuffle_loop_unroll: %fms\n", elapsedTime_shuffle_loop_unroll / n_rounds);
// cublas
float elapsedTime_cublas;
cudaEvent_t start_cublas, stop_cublas;
CUDA_CHECK(cudaEventCreate(&start_cublas));
CUDA_CHECK(cudaEventCreate(&stop_cublas));
cublasHandle_t handle;
(cublasCreate(&handle));
(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
// transpose by default because of column priority
cublasOperation_t trans = CUBLAS_OP_N;
const int dim0_tensor1 = d_A_trans.n_row;
const int dim1_tensor1 = d_A_trans.n_col;
const int lda = dim1_tensor1;
const int incx = 1;
const int incy = 1;
const float *a = d_A_trans.data;
const float *x = d_X.data;
float *y = d_Y.data;
float alpha = 1, beta = 0;
CUDA_CHECK(cudaEventRecord(start_cublas, 0));
for (int i = 0; i < n_rounds; ++i)
(cublasSgemv(handle, trans, dim1_tensor1, dim0_tensor1,
&alpha, a, lda, x, incx, &beta, y, incy));
CUDA_CHECK(cudaEventRecord(stop_cublas, 0));
CUDA_CHECK(cudaEventSynchronize(stop_cublas));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_cublas, start_cublas, stop_cublas));
CUDA_CHECK(cudaEventDestroy(start_cublas));
CUDA_CHECK(cudaEventDestroy(stop_cublas));
printf("Time of cublas: %fms\n", elapsedTime_cublas / n_rounds);
// copy data from device to host
CUDA_CHECK(cudaMemcpy(Y.data, d_Y.data, size_Y * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(d_A.data));
CUDA_CHECK(cudaFree(d_X.data));
CUDA_CHECK(cudaFree(d_Y.data));
free(A_trans.data);
return;
} |
85533e659dc32c4d688fee98e665d8bdd25dbc14.hip | // !!! This is a file automatically generated by hipify!!!
/*
* full_parallel.cu is a program to take in an array of data and an array of molecular counts and a desired width for analysis
* V 1.0
* we expect a format of [xf_all, yf_all, N, off_all, sigx_all, sigy_all, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv_all, framenum_all] = full_parallel_chain_loc [i1, a1, width, xpix, ypix, mol_nums,sigma, fx_all, bkgn];
* here iall is the stack of images containing molecular emissions that need to be segmented and localized. a1 is the corresponding stack of counting images. width is the width of a segmented image typically 7, xpix and ypix are grid variables used in localization calculation, mol_nums is a scalar number of molecules to be analyzed, sigma is the initial width, bkgn is an initial offset guess, fx_all is a fake vector size of mol_num x 1 to project the output data onto.
* AJN 5/3/16
*/
#include "mex.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define PI 3.14159265358979323846
#define O_TILE_WIDTH 25 // variable to determine how many output tiles will be considered in a block
# define BLOCK_WIDTH (O_TILE_WIDTH + (7-1)) // block width needs to be output tiles + mask_width - 1 to ensure enough pixels are covered for calculation
/*
* Device code
*
*
*/
__device__ double device_det(double Fisher[36])
{
double det;
det = Fisher[0] * (Fisher[7] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) + Fisher[19] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[25] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) + Fisher[31] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])))) - Fisher[6] * (Fisher[1] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])))) + Fisher[12] * (Fisher[1] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[18] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) + Fisher[24] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[30] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))));
return det;
}
void __global__ segment and localize7(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol,
double bkgn)
{
// Declare variables
double d_i2[7][7]; // preallocate space for shared image
__shared__ double xgrid[7 * 7]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[7 * 7]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output + irow*col_output + irow*icol*imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output + irow*col_output + irow*icol*imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 7; row++){
for (int col = 0; col < 7; col++){
xgrid[row][col] = d_xpix[row + 7 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 7 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
for (int row = 0; row < 7; row++){
for (int col = 0; col < 7; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = bkgn;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < irow; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < irow; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 7
void __global__ segment and localize7(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol,
double bkgn)
{
// Declare variables
double d_i2[9][9]; // preallocate space for shared image
__shared__ double xgrid[9 * 9]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[9 * 9]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output + irow*col_output + irow*icol*imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output + irow*col_output + irow*icol*imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 9; row++){
for (int col = 0; col < 9; col++){
xgrid[row][col] = d_xpix[row + 9 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 9 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
for (int row = 0; row < 9; row++){
for (int col = 0; col < 9; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = bkgn;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < irow; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < irow; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 9
void __global__ segment and localize11(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol)
{
// Declare variables
double d_i2[11][11]; // preallocate space for shared image
__shared__ double xgrid[11 * 11]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[11 * 11]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output][col_output][imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output][col_output][imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 11; row++){
for (int col = 0; col < 11; col++){
xgrid[row][col] = d_xpix[row + 11 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 11 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
double mina = 1000000;
for (int row = 0; row < 11; row++){
for (int col = 0; col < 11; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
if (d_i2[row][col] < mina){ // find minimum value
mina = d_i2;
}
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = mina;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < 11; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < 11; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 11
/*
THIS IS THE SECTION FOR IDENTIFICATION
*/
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
double *iall; // the pointer to the array of all images to be analyzed
double *a1;
double *sigma;
double *d_iall; // Pointer to image array on gpu
double *d_a1; // pointer to count array on gpu
double *d_framenum_all;
double *d_x_cm; // pointer to parameters on device
double *d_y_cm;
double *d_xf_all;
double *d_yf_all;
double *d_sigx_all;
double *d_sigy_all;
double *d_N;
double *d_off;
double *d_llv;
double *d_xf_crlb;
double *d_yf_crlb;
double *d_N_crlb;
double *d_off_crlb;
double *d_sigx_crlb;
double * d_sigy_crlb;
double *xpix;
double *ypix;
double *d_xpix;
double *d_ypix;
double *bkgn;
int irow; // number of pixels in a row which should also be the number in a coloumn
int icol; // n
int numi; // number of images imported
int arow;
int acol;
int numa;
int widths;
const int *idims, *adims, *xdims, *ydims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 9) {
printf("Must have 8 inputs ( iall, a1, width, xpix, ypix, mol_num, sigs, fx_all, bkgn) line: %d\n", __LINE__);
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])){
printf("iall must be a m x n x numel(iall(1,1,:)) double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[1]) || mxIsComplex(prhs[1])){
printf("a1 must be a m x n xnumel(iall(1,1,:)) double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])){
printf("Width must be a l x 1 double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[3]) || mxIsComplex(prhs[3])){
printf("xpix must be a width x witdh double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[4]) || mxIsComplex(prhs[4])){
printf("ypix must be a width x witdh double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[5]) || mxIsComplex(prhs[5])){
printf("mol_num must be a 1 x 1 double array\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (double *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
icol = (int)idims[1];
irow = (int)idims[0];
numi = (int)idims[2]; // get number of images perblock from matlab
if (numi > 10000000 || numi < 1){
numi = 1;
}
// get dimensions of activation image
a1 = (double *)mxGetPr(prhs[2]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
adims = mxGetDimensions(prhs[2]); // get dimensions of image array
acol = (int)adims[1];
arow = (int)adims[0];
numa = (int)adims[2]; // get number of images perblock from matlab
if (numa > 10000000 || numa < 1){
numa = 1;
}
// get width pointer
widths = (int *)mxGetPr(prhs[2]);
// get xpix dims
xpix = (double *)mxGetPr(prhs[3]);
xdims = mxGetDimensions(prhs[3]);
// get ypix dims
ypix = (double *)mxGetPr(prhs[4]);
ydims = mxGetDimensions(prhs[4]);
// get number of molecules
mol_num = (double *)mxGetPr(prhs[5]);
sigma = (double *)mxGetPr(prhs[6]);
bkgn = (double *)mxGetPr(prhs[8]]);
// EVERYONE LOVES SOME GOOD VARIABLE CHECKING!!!!!!!!!!!!!!
if (icol != acol){
printf("a1 and iall must have same number of columns\n");
mexErrMsgTxt("See Above Error!\n");
}
if (irow != arow){
printf("a1 and iall must have same number of rows\n");
mexErrMsgTxt("See Above Error!\n");
}
if (numi != numa){
printf("a1 and iall must have same number of frames\n");
mexErrMsgTxt("See Above Error!\n");
}
if (xdims[0] != ydims[0]){
printf("xpix and ypix must have same number of columns\n");
mexErrMsgTxt("See Above Error!\n");
}
if (xdims[1] != ydims[1]){
printf("xpix and ypix must have same number of rows\n");
mexErrMsgTxt("See Above Error!\n");
}
// Did the User declare an output?
if (nlhs != 14){
printf("You must have 14 output variables [xf_all, yf_all, N, off_all, sigx_all, sigy_all, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv_all, framenum_all]\n");
mexErrMsgTxt("See Error above!\n");
}
hipDeviceReset();
// allocate memory on the gpu device
hipError_t err1 = hipMalloc((void**)&d_iall, irow*icol*(numi)*sizeof(double)); // allocate image memory
if (err1 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err2 = hipMalloc((void**)&d_a1, irow*icol*numa*sizeof(double)); // allocate a1 memory
if (err2 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err3 = hipMalloc((void**)&d_xpix, widths*widths*sizeof(double)); // allocate xpix
if (err3 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err3), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err4 = hipMalloc((void**)&d_ypix, widths*widths*sizeof(double)); // allocate ypix
if (err4 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err4), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err5 = hipMalloc((void**)&d_llv, mol_num*sizeof(double)); // allocate llv array on gpu
if (err5 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err5), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err6 = hipMalloc((void**)&d_N, mol_num*sizeof(double)); // allocate N array on gpu
if (err6 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err6), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err7 = hipMalloc((void**)&d_off, mol_num*sizeof(double)); // allocate offset array on gpu
if (err7 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err7), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err8 = hipMalloc((void**)&d_yf_all, mol_num*sizeof(double)); // allocate yf_all array on gpu
if (err8 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err8), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err9 = hipMalloc((void**)&d_xf_all, mol_num*sizeof(double)); // allocate xf_all array on gpu
if (err9 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err9), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err10 = hipMalloc((void**)&d_framenum_temp, mol_num*sizeof(double)); // allocate framenum_temp array on gpu
if (err10 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err10), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err11 = hipMalloc((void**)&d_framenum_all, mol_num*sizeof(double)); // allocate framenum_all array on gpu
if (err11 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err11), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err12 = hipMalloc((void**)&d_sigx_all, mol_num*sizeof(double)); // allocate sigx_all array on gpu
if (err12 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err12), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err13 = hipMalloc((void**)&d_sigy_all, mol_num*sizeof(double)); // allocate sigy_all array on gpu
if (err13 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err13), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err14 = hipMalloc((void**)&d_xf_crlb, mol_num*sizeof(double)); // Allocate xf_crlb array on gpu
if (err14 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err14), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err15 = hipMalloc((void**)&d_yf_crlb, mol_num*sizeof(double)); // allocate yf_crlb array on gpu
if (err15 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err15), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err16 = hipMalloc((void**)&d_N_crlb, mol_num*sizeof(double)); // allocate N_crlb array on gpu
if (err16 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err16), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err17 = hipMalloc((void**)&d_off_crlb, mol_num*sizeof(double)); // allocate Off_crlb array on gpu
if (err17 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err17), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err18 = hipMalloc((void**)&d_sigx_crlb, mol_num*sizeof(double)); // allocate sigx_crlb array on gpu
if (err18 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err18), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err19 = hipMalloc((void**)&d_sigy_crlb, mol_num*sizeof(double)); // allocate sigy_crlb array on gpu
if (err19 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err19), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
// copy data from host to device
hipError_t err20 = hipMemcpy(d_iall, iall, irow*icol*(numi)*sizeof(double), hipMemcpyHostToDevice); // copy image data to gpu
if (err20 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err20), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err21 = hipMemcpy(d_a1, a1, arow*acol*numa*sizeof(double), hipMemcpyHostToDevice); // copy a1 data to gpu
if (err21 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err21), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err22 = hipMemcpy(d_xpix, xpix, widths*widths*sizeof(double), hipMemcpyHostToDevice); // copy xpix data to gpu
if (err22 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err22), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
hipError_t err23 = hipMemcpy(d_ypix, ypix, widths*widths*sizeof(double), hipMemcpyHostToDevice); // copy ypix data to gpu
if (err23 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err23), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
/* Run GPU kernel*/
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH); // run 2-D gpu kernel to help with indexing
dim3 dimGrid((icol - 1) / O_TILE_WIDTH + 1, (irow - 1) / O_TILE_WIDTH + 1, numi );
switch (widths)
{
case 7:
segment_and_localize7 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
case 9:
segment_and_localize9 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
case 11:
segment_and_localize11 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
default:
printf("Image size is inappropriate please choose either 7x7, 9x9, or 11x11 size\n");
mexErrMsgTxt("See Error Above!\n");
break;
}
/* copy data back to mxarray pointers for output
*
*
* Duplicate the input array of equal size to the output array
* Send the pointer to a variable
* copy data to place pointer points to, which is output
*/
/*
hipError_t errk1 = hipPeekAtLastError();
if (errk1 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(errk1), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
*/
hipError_t err24 = hipDeviceSynchronize();
if (err24 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err24), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[11] = mxDuplicateArray(prhs[7]);
double *sigy_crlb = (double *)mxGetPr(plhs[11]);
hipError_t err25 = hipMemcpy(sigy_crlb, d_sigy_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy sigy_crlb data
if (err25 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err25), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[12] = mxDuplicateArray(prhs[7]);
double *llv = (double *)mxGetPr(plhs[12]);
hipError_t err26 = hipMemcpy(llv, d_llv, numi*sizeof(double), hipMemcpyDeviceToHost); // copy llv data
if (err26 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err26), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[13] = mxDuplicateArray(prhs[7]);
double *framenum_all = (double *)mxGetPr(plhs[13]);
hipError_t err27 = hipMemcpy(framenum_all, d_framenum_all, numi*sizeof(double), hipMemcpyDeviceToHost); // copy framenum_all data
if (err27 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err27), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[0] = mxDuplicateArray(prhs[7]);
mxArray *xf_all = (mxArray *)mxGetPr(plhs[0]);
hipError_t err28 = hipMemcpy(xf_all, d_xf_all, numi*sizeof(double), hipMemcpyDeviceToHost); // copy xf_all data
if (err28 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err28), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[1] = mxDuplicateArray(prhs[7]);
double *yf_all = (double *)mxGetPr(plhs[1]);
hipError_t err29 = hipMemcpy(yf_all, d_yf_all, numi*sizeof(double), hipMemcpyDeviceToHost); // copy yf_all data
if (err29 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err29), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[2] = mxDuplicateArray(prhs[7]);
double *N = (double *)mxGetPr(plhs[2]);
hipError_t err30 = hipMemcpy(N, d_N, numi*sizeof(double), hipMemcpyDeviceToHost); // copy N data
if (err30 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err30), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[3] = mxDuplicateArray(prhs[7]);
double *off_all = (double *)mxGetPr(plhs[3]);
hipError_t err31 = hipMemcpy(off_all, d_off, numi*sizeof(double), hipMemcpyDeviceToHost); // copy off_all data
if (err31 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err31), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[4] = mxDuplicateArray(prhs[7]);
double *sig_x = (double *)mxGetPr(plhs[4]);
hipError_t err32 = hipMemcpy(sig_x, d_sigx_all, numi*sizeof(double), hipMemcpyDeviceToHost); // copy sigx data
if (err32 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err32), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[5] = mxDuplicateArray(prhs[7]);
double *sig_y = (double *)mxGetPr(plhs[5]);
hipError_t err33 = hipMemcpy(sig_y, d_sigy_all, numi*sizeof(double), hipMemcpyDeviceToHost); // copy sigy data
if (err33 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err33), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[6] = mxDuplicateArray(prhs[7]);
double *xf_crlb = (double *)mxGetPr(plhs[6]);
hipError_t err34 = hipMemcpy(xf_crlb, d_xf_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy xf_crlb data
if (err34 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err34), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
};
plhs[7] = mxDuplicateArray(prhs[7]);
double *yf_crlb = (double *)mxGetPr(plhs[7]);
hipError_t err35 = hipMemcpy(yf_crlb, d_yf_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy yf_crlb data
if (err35 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err35), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[8] = mxDuplicateArray(prhs[7]);
double *N_crlb = (double *)mxGetPr(plhs[8]);
hipError_t err36 = hipMemcpy(N_crlb, d_N_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy N_crlb data
if (err36 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err36), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[9] = mxDuplicateArray(prhs[7]);
double *off_crlb = (double *)mxGetPr(plhs[9]);
hipError_t err37 = hipMemcpy(off_crlb, d_off_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy off_crlb data
if (err37 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err37), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[10] = mxDuplicateArray(prhs[7]);
double *sigx_crlb = (double *)mxGetPr(plhs[10]);
hipError_t err38 = hipMemcpy(sigx_crlb, d_sigx_crlb, numi*sizeof(double), hipMemcpyDeviceToHost); // copy sigx_crlb data
if (err38 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err38), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
//hipDeviceReset();
hipFree(d_iall);
hipFree(d_a1);
hipFree(d_N);
hipFree(d_framenum_all);
hipFree(d_xf_all);
hipFree(d_yf_all);
hipFree(d_off);
hipFree(d_xpix);
hipFree(d_ypix);
hipFree(d_sigx_all);
hipFree(d_sigy_all);
hipFree(d_xf_crlb);
hipFree(d_yf_crlb);
hipFree(d_N_crlb);
hipFree(d_off_crlb);
hipFree(d_sigx_crlb);
hipFree(d_sigy_crlb);
hipFree(d_llv);
return;
} | 85533e659dc32c4d688fee98e665d8bdd25dbc14.cu | /*
* full_parallel.cu is a program to take in an array of data and an array of molecular counts and a desired width for analysis
* V 1.0
* we expect a format of [xf_all, yf_all, N, off_all, sigx_all, sigy_all, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv_all, framenum_all] = full_parallel_chain_loc [i1, a1, width, xpix, ypix, mol_nums,sigma, fx_all, bkgn];
* here iall is the stack of images containing molecular emissions that need to be segmented and localized. a1 is the corresponding stack of counting images. width is the width of a segmented image typically 7, xpix and ypix are grid variables used in localization calculation, mol_nums is a scalar number of molecules to be analyzed, sigma is the initial width, bkgn is an initial offset guess, fx_all is a fake vector size of mol_num x 1 to project the output data onto.
* AJN 5/3/16
*/
#include "mex.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define PI 3.14159265358979323846
#define O_TILE_WIDTH 25 // variable to determine how many output tiles will be considered in a block
# define BLOCK_WIDTH (O_TILE_WIDTH + (7-1)) // block width needs to be output tiles + mask_width - 1 to ensure enough pixels are covered for calculation
/*
* Device code
*
*
*/
__device__ double device_det(double Fisher[36])
{
double det;
det = Fisher[0] * (Fisher[7] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) + Fisher[19] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[25] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) + Fisher[31] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])))) - Fisher[6] * (Fisher[1] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])))) + Fisher[12] * (Fisher[1] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[18] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) + Fisher[24] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[30] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))));
return det;
}
void __global__ segment and localize7(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol,
double bkgn)
{
// Declare variables
double d_i2[7][7]; // preallocate space for shared image
__shared__ double xgrid[7 * 7]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[7 * 7]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output + irow*col_output + irow*icol*imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output + irow*col_output + irow*icol*imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 7; row++){
for (int col = 0; col < 7; col++){
xgrid[row][col] = d_xpix[row + 7 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 7 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
for (int row = 0; row < 7; row++){
for (int col = 0; col < 7; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = bkgn;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < irow; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < irow; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 7
void __global__ segment and localize7(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol,
double bkgn)
{
// Declare variables
double d_i2[9][9]; // preallocate space for shared image
__shared__ double xgrid[9 * 9]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[9 * 9]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output + irow*col_output + irow*icol*imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output + irow*col_output + irow*icol*imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 9; row++){
for (int col = 0; col < 9; col++){
xgrid[row][col] = d_xpix[row + 9 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 9 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
for (int row = 0; row < 9; row++){
for (int col = 0; col < 9; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = bkgn;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < irow; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < irow; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 9
void __global__ segment and localize11(double *d_iall, // the gaussian is a separable filter and be treated as such
double *d_a1, // makes these elements eligible for constant caching
double sigma,
double *xpix,
double *ypix,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_framenum_all,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_xpix,
double *d_ypix,
double * d_llv,
int numi,
int irow,
int icol)
{
// Declare variables
double d_i2[11][11]; // preallocate space for shared image
__shared__ double xgrid[11 * 11]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[11 * 11]; // this will reduce calls to global device memory
*/
// Coordinate building
int tx = threadIdx.x; // local x coord
int ty = threadIdx.y; // local y coord
int tz = threadIdx.z;
// location of output pixel being analyzed We do not need a localization apron
int row_output = blockIdx.y*O_TILE_WIDTH + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*O_TILE_WIDTH + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < numi){
if (d_a1[row_output][col_output][imnum] >0){ // the assumption at this point is if d_a1 has a value greater than 0 the neural net said to analyze
int row_input = row_output - 3; // EACH thread should load 1 input tile to the shared image as there are [BLOCK_WIDTH]x[BLOCK_WIDTH] threads in a block
int col_input = col_output - 3; // and BLOCK_WIDTH = O_TILE_WIDTH + MASK_WIDTH-1
int index = (int)d_a1[row_output][col_output][imnum];
// Buffer data into block
// buffer segment into i2
for (int row = 0; row < 11; row++){
for (int col = 0; col < 11; col++){
xgrid[row][col] = d_xpix[row + 11 * col]; // load x and ypix
ygrid[row][col] = d_ypix[row + 11 * col];
if ((row_input + row >= 0) && (row_input + row < irow) && (col_input + col >= 0) && (col_input + col < icol)){ // if statement checks the row/col indices to ensure they fall onto the input image
d_i2[row][col] = d_iall[row_input + row + (col_input + col)*irow + imnum*irow*icol]; // if true, the value of the image is written to the shared array at location d_i2[ty][tx] and stored locally
}
}
}// end counting of rows and columns at this point the image to localize is contained in d_i2
// at this point we should have xpix ypix and the image to localize loaded to 1 core
// Determine the beta estimations
// Determining X and Y guesses
// center of mass approach
double xsum = 0.0;
double ysum = 0.0;
double sumt = 0;
double mina = 1000000;
for (int row = 0; row < 11; row++){
for (int col = 0; col < 11; col++){
sumt += d_i2[row][col]; // sum can also be used to determine N guess
xsum += xgrid[row][col] * d_i2[row][col];
ysum += ygrid[row][col] * d_i2[row][col];
if (d_i2[row][col] < mina){ // find minimum value
mina = d_i2;
}
}
} // end counting over rows
// final estimation of xguess and yguess as xcm and ycm
d_beta1[0] = xsum / sumt;
d_beta1[1] = ysum / sumt;
d_beta1[2] = sumt;
d_beta1[3] = sigma;
d_beta1[4] = sigma;
d_beta1[5] = mina;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 50; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < 11; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < 11; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))* ((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))* ((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*irow] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*irow] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*irow] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*irow] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*irow] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*irow] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*irow] / powf(u, 2.0);
if (counttry == 49){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount + colcount*irow] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*irow] * log(d_i2[rowcount + colcount*irow] + 0.0000000000000001) + d_i2[rowcount + colcount*irow];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] > xgrid[0] && d_beta1[0] < xgrid[irow*irow - 1] && d_beta1[1] < ygrid[irow*irow - 1] && d_beta1[1] > ygrid[0] && d_beta1[3] > 0 && d_beta1[3] < 100 && d_beta1[4] < 100 && d_beta1[4] > 0 && d_beta1[5] > 0){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] + col_output; // correct position for x
d_yf_all[index] = d_beta1[1] + row_output; // correct position for y
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_framenum_all[index] = imnum;
d_llv[index] = llv;
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) + fisher[32] * (fisher[15] * fisher[23] - fisher[21] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[23] - fisher[21] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[35] - fisher[33] * fisher[23]) - fisher[20] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[23] - fisher[21] * fisher[5])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) + fisher[32] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[35] - fisher[33] * fisher[17]) - fisher[14] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[35] - fisher[33] * fisher[11]) - fisher[8] * (fisher[3] * fisher[35] - fisher[33] * fisher[5]) + fisher[32] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) + fisher[20] * (fisher[9] * fisher[17] - fisher[15] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[23] - fisher[21] * fisher[17]) - fisher[14] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[17] - fisher[15] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[23] - fisher[21] * fisher[11]) - fisher[8] * (fisher[3] * fisher[23] - fisher[21] * fisher[5]) + fisher[20] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[17] - fisher[15] * fisher[11]) - fisher[8] * (fisher[3] * fisher[17] - fisher[15] * fisher[5]) + fisher[14] * (fisher[3] * fisher[11] - fisher[9] * fisher[5])))) / det_fish;
d_sigy_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) + fisher[19] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[25] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) + fisher[26] * (fisher[15] * fisher[22] - fisher[21] * fisher[16])) - fisher[13] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[22] - fisher[21] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[21] * fisher[28] - fisher[27] * fisher[22]) - fisher[20] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[22] - fisher[21] * fisher[4])) + fisher[19] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) - fisher[18] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) + fisher[26] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[28] - fisher[27] * fisher[16]) - fisher[14] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[28] - fisher[27] * fisher[10]) - fisher[8] * (fisher[3] * fisher[28] - fisher[27] * fisher[4]) + fisher[26] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[25] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4]))) + fisher[24] * (fisher[1] * (fisher[8] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) + fisher[20] * (fisher[9] * fisher[16] - fisher[15] * fisher[10])) - fisher[7] * (fisher[2] * (fisher[15] * fisher[22] - fisher[21] * fisher[16]) - fisher[14] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[16] - fisher[15] * fisher[4])) + fisher[13] * (fisher[2] * (fisher[9] * fisher[22] - fisher[21] * fisher[10]) - fisher[8] * (fisher[3] * fisher[22] - fisher[21] * fisher[4]) + fisher[20] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])) - fisher[19] * (fisher[2] * (fisher[9] * fisher[16] - fisher[15] * fisher[10]) - fisher[8] * (fisher[3] * fisher[16] - fisher[15] * fisher[4]) + fisher[14] * (fisher[3] * fisher[10] - fisher[9] * fisher[4])))) / det_fish);
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_framenum_all[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = -1;
} // end else fail statement
}
}// end if activated
}// end if and image
} // end gpu segment and loc 11
/*
THIS IS THE SECTION FOR IDENTIFICATION
*/
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
double *iall; // the pointer to the array of all images to be analyzed
double *a1;
double *sigma;
double *d_iall; // Pointer to image array on gpu
double *d_a1; // pointer to count array on gpu
double *d_framenum_all;
double *d_x_cm; // pointer to parameters on device
double *d_y_cm;
double *d_xf_all;
double *d_yf_all;
double *d_sigx_all;
double *d_sigy_all;
double *d_N;
double *d_off;
double *d_llv;
double *d_xf_crlb;
double *d_yf_crlb;
double *d_N_crlb;
double *d_off_crlb;
double *d_sigx_crlb;
double * d_sigy_crlb;
double *xpix;
double *ypix;
double *d_xpix;
double *d_ypix;
double *bkgn;
int irow; // number of pixels in a row which should also be the number in a coloumn
int icol; // n
int numi; // number of images imported
int arow;
int acol;
int numa;
int widths;
const int *idims, *adims, *xdims, *ydims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 9) {
printf("Must have 8 inputs ( iall, a1, width, xpix, ypix, mol_num, sigs, fx_all, bkgn) line: %d\n", __LINE__);
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])){
printf("iall must be a m x n x numel(iall(1,1,:)) double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[1]) || mxIsComplex(prhs[1])){
printf("a1 must be a m x n xnumel(iall(1,1,:)) double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])){
printf("Width must be a l x 1 double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[3]) || mxIsComplex(prhs[3])){
printf("xpix must be a width x witdh double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[4]) || mxIsComplex(prhs[4])){
printf("ypix must be a width x witdh double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[5]) || mxIsComplex(prhs[5])){
printf("mol_num must be a 1 x 1 double array\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (double *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
icol = (int)idims[1];
irow = (int)idims[0];
numi = (int)idims[2]; // get number of images perblock from matlab
if (numi > 10000000 || numi < 1){
numi = 1;
}
// get dimensions of activation image
a1 = (double *)mxGetPr(prhs[2]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
adims = mxGetDimensions(prhs[2]); // get dimensions of image array
acol = (int)adims[1];
arow = (int)adims[0];
numa = (int)adims[2]; // get number of images perblock from matlab
if (numa > 10000000 || numa < 1){
numa = 1;
}
// get width pointer
widths = (int *)mxGetPr(prhs[2]);
// get xpix dims
xpix = (double *)mxGetPr(prhs[3]);
xdims = mxGetDimensions(prhs[3]);
// get ypix dims
ypix = (double *)mxGetPr(prhs[4]);
ydims = mxGetDimensions(prhs[4]);
// get number of molecules
mol_num = (double *)mxGetPr(prhs[5]);
sigma = (double *)mxGetPr(prhs[6]);
bkgn = (double *)mxGetPr(prhs[8]]);
// EVERYONE LOVES SOME GOOD VARIABLE CHECKING!!!!!!!!!!!!!!
if (icol != acol){
printf("a1 and iall must have same number of columns\n");
mexErrMsgTxt("See Above Error!\n");
}
if (irow != arow){
printf("a1 and iall must have same number of rows\n");
mexErrMsgTxt("See Above Error!\n");
}
if (numi != numa){
printf("a1 and iall must have same number of frames\n");
mexErrMsgTxt("See Above Error!\n");
}
if (xdims[0] != ydims[0]){
printf("xpix and ypix must have same number of columns\n");
mexErrMsgTxt("See Above Error!\n");
}
if (xdims[1] != ydims[1]){
printf("xpix and ypix must have same number of rows\n");
mexErrMsgTxt("See Above Error!\n");
}
// Did the User declare an output?
if (nlhs != 14){
printf("You must have 14 output variables [xf_all, yf_all, N, off_all, sigx_all, sigy_all, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv_all, framenum_all]\n");
mexErrMsgTxt("See Error above!\n");
}
cudaDeviceReset();
// allocate memory on the gpu device
cudaError_t err1 = cudaMalloc((void**)&d_iall, irow*icol*(numi)*sizeof(double)); // allocate image memory
if (err1 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err2 = cudaMalloc((void**)&d_a1, irow*icol*numa*sizeof(double)); // allocate a1 memory
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err3 = cudaMalloc((void**)&d_xpix, widths*widths*sizeof(double)); // allocate xpix
if (err3 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err4 = cudaMalloc((void**)&d_ypix, widths*widths*sizeof(double)); // allocate ypix
if (err4 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err4), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err5 = cudaMalloc((void**)&d_llv, mol_num*sizeof(double)); // allocate llv array on gpu
if (err5 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err5), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err6 = cudaMalloc((void**)&d_N, mol_num*sizeof(double)); // allocate N array on gpu
if (err6 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err6), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err7 = cudaMalloc((void**)&d_off, mol_num*sizeof(double)); // allocate offset array on gpu
if (err7 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err7), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err8 = cudaMalloc((void**)&d_yf_all, mol_num*sizeof(double)); // allocate yf_all array on gpu
if (err8 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err8), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err9 = cudaMalloc((void**)&d_xf_all, mol_num*sizeof(double)); // allocate xf_all array on gpu
if (err9 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err9), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err10 = cudaMalloc((void**)&d_framenum_temp, mol_num*sizeof(double)); // allocate framenum_temp array on gpu
if (err10 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err10), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err11 = cudaMalloc((void**)&d_framenum_all, mol_num*sizeof(double)); // allocate framenum_all array on gpu
if (err11 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err11), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err12 = cudaMalloc((void**)&d_sigx_all, mol_num*sizeof(double)); // allocate sigx_all array on gpu
if (err12 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err12), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err13 = cudaMalloc((void**)&d_sigy_all, mol_num*sizeof(double)); // allocate sigy_all array on gpu
if (err13 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err13), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err14 = cudaMalloc((void**)&d_xf_crlb, mol_num*sizeof(double)); // Allocate xf_crlb array on gpu
if (err14 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err14), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err15 = cudaMalloc((void**)&d_yf_crlb, mol_num*sizeof(double)); // allocate yf_crlb array on gpu
if (err15 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err15), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err16 = cudaMalloc((void**)&d_N_crlb, mol_num*sizeof(double)); // allocate N_crlb array on gpu
if (err16 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err16), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err17 = cudaMalloc((void**)&d_off_crlb, mol_num*sizeof(double)); // allocate Off_crlb array on gpu
if (err17 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err17), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err18 = cudaMalloc((void**)&d_sigx_crlb, mol_num*sizeof(double)); // allocate sigx_crlb array on gpu
if (err18 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err18), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err19 = cudaMalloc((void**)&d_sigy_crlb, mol_num*sizeof(double)); // allocate sigy_crlb array on gpu
if (err19 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err19), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
// copy data from host to device
cudaError_t err20 = cudaMemcpy(d_iall, iall, irow*icol*(numi)*sizeof(double), cudaMemcpyHostToDevice); // copy image data to gpu
if (err20 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err20), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err21 = cudaMemcpy(d_a1, a1, arow*acol*numa*sizeof(double), cudaMemcpyHostToDevice); // copy a1 data to gpu
if (err21 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err21), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err22 = cudaMemcpy(d_xpix, xpix, widths*widths*sizeof(double), cudaMemcpyHostToDevice); // copy xpix data to gpu
if (err22 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err22), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
cudaError_t err23 = cudaMemcpy(d_ypix, ypix, widths*widths*sizeof(double), cudaMemcpyHostToDevice); // copy ypix data to gpu
if (err23 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err23), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
/* Run GPU kernel*/
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH); // run 2-D gpu kernel to help with indexing
dim3 dimGrid((icol - 1) / O_TILE_WIDTH + 1, (irow - 1) / O_TILE_WIDTH + 1, numi );
switch (widths)
{
case 7:
segment_and_localize7 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
case 9:
segment_and_localize9 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
case 11:
segment_and_localize11 << < dimGrid, dimBlock >> >(d_iall, d_a1, sigma, d_xpix, d_ypix, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_framenum_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_xpix, d_ypix, d_llv, numi, irow, icol, bkgn);
break;
default:
printf("Image size is inappropriate please choose either 7x7, 9x9, or 11x11 size\n");
mexErrMsgTxt("See Error Above!\n");
break;
}
/* copy data back to mxarray pointers for output
*
*
* Duplicate the input array of equal size to the output array
* Send the pointer to a variable
* copy data to place pointer points to, which is output
*/
/*
cudaError_t errk1 = cudaPeekAtLastError();
if (errk1 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(errk1), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
*/
cudaError_t err24 = cudaThreadSynchronize();
if (err24 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err24), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[11] = mxDuplicateArray(prhs[7]);
double *sigy_crlb = (double *)mxGetPr(plhs[11]);
cudaError_t err25 = cudaMemcpy(sigy_crlb, d_sigy_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy sigy_crlb data
if (err25 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err25), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[12] = mxDuplicateArray(prhs[7]);
double *llv = (double *)mxGetPr(plhs[12]);
cudaError_t err26 = cudaMemcpy(llv, d_llv, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy llv data
if (err26 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err26), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[13] = mxDuplicateArray(prhs[7]);
double *framenum_all = (double *)mxGetPr(plhs[13]);
cudaError_t err27 = cudaMemcpy(framenum_all, d_framenum_all, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy framenum_all data
if (err27 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err27), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[0] = mxDuplicateArray(prhs[7]);
mxArray *xf_all = (mxArray *)mxGetPr(plhs[0]);
cudaError_t err28 = cudaMemcpy(xf_all, d_xf_all, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy xf_all data
if (err28 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err28), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[1] = mxDuplicateArray(prhs[7]);
double *yf_all = (double *)mxGetPr(plhs[1]);
cudaError_t err29 = cudaMemcpy(yf_all, d_yf_all, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy yf_all data
if (err29 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err29), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[2] = mxDuplicateArray(prhs[7]);
double *N = (double *)mxGetPr(plhs[2]);
cudaError_t err30 = cudaMemcpy(N, d_N, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy N data
if (err30 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err30), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[3] = mxDuplicateArray(prhs[7]);
double *off_all = (double *)mxGetPr(plhs[3]);
cudaError_t err31 = cudaMemcpy(off_all, d_off, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy off_all data
if (err31 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err31), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[4] = mxDuplicateArray(prhs[7]);
double *sig_x = (double *)mxGetPr(plhs[4]);
cudaError_t err32 = cudaMemcpy(sig_x, d_sigx_all, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy sigx data
if (err32 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err32), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[5] = mxDuplicateArray(prhs[7]);
double *sig_y = (double *)mxGetPr(plhs[5]);
cudaError_t err33 = cudaMemcpy(sig_y, d_sigy_all, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy sigy data
if (err33 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err33), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[6] = mxDuplicateArray(prhs[7]);
double *xf_crlb = (double *)mxGetPr(plhs[6]);
cudaError_t err34 = cudaMemcpy(xf_crlb, d_xf_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy xf_crlb data
if (err34 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err34), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
};
plhs[7] = mxDuplicateArray(prhs[7]);
double *yf_crlb = (double *)mxGetPr(plhs[7]);
cudaError_t err35 = cudaMemcpy(yf_crlb, d_yf_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy yf_crlb data
if (err35 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err35), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[8] = mxDuplicateArray(prhs[7]);
double *N_crlb = (double *)mxGetPr(plhs[8]);
cudaError_t err36 = cudaMemcpy(N_crlb, d_N_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy N_crlb data
if (err36 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err36), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[9] = mxDuplicateArray(prhs[7]);
double *off_crlb = (double *)mxGetPr(plhs[9]);
cudaError_t err37 = cudaMemcpy(off_crlb, d_off_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy off_crlb data
if (err37 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err37), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
plhs[10] = mxDuplicateArray(prhs[7]);
double *sigx_crlb = (double *)mxGetPr(plhs[10]);
cudaError_t err38 = cudaMemcpy(sigx_crlb, d_sigx_crlb, numi*sizeof(double), cudaMemcpyDeviceToHost); // copy sigx_crlb data
if (err38 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err38), __FILE__, __LINE__);
mexErrMsgTxt("See Error above!\n");
}
//cudaDeviceReset();
cudaFree(d_iall);
cudaFree(d_a1);
cudaFree(d_N);
cudaFree(d_framenum_all);
cudaFree(d_xf_all);
cudaFree(d_yf_all);
cudaFree(d_off);
cudaFree(d_xpix);
cudaFree(d_ypix);
cudaFree(d_sigx_all);
cudaFree(d_sigy_all);
cudaFree(d_xf_crlb);
cudaFree(d_yf_crlb);
cudaFree(d_N_crlb);
cudaFree(d_off_crlb);
cudaFree(d_sigx_crlb);
cudaFree(d_sigy_crlb);
cudaFree(d_llv);
return;
} |
427da85b04f6fb44d269fe7e5f764956cd43f9e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "class.hpp"
//#include "force.hpp"
#include<particle_simulator.hpp>
#include "cuda_pointer.h"
#include "force_gpu_cuda.hpp"
enum{
N_THREAD_GPU = 32,
N_WALK_LIMIT = 1000,
NI_LIMIT = N_WALK_LIMIT*1000,
NJ_LIMIT = N_WALK_LIMIT*10000,
};
struct EpiGPU{
float3 pos;
int id_walk;
};
struct EpjGPU{
float4 posm;
};
struct ForceGPU{
float4 accp;
};
inline __device__ float4 dev_gravity(
float eps2,
float3 ipos,
float4 jposm,
float4 accp)
{
float dx = jposm.x - ipos.x;
float dy = jposm.y - ipos.y;
float dz = jposm.z - ipos.z;
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float rinv = rsqrtf(r2);
float pij = jposm.w * rinv;
float mri3 = rinv*rinv * pij;
accp.x += mri3 * dx;
accp.y += mri3 * dy;
accp.z += mri3 * dz;
accp.w -= pij;
return accp;
}
#if 0
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float3 ipos = epi[tid].pos;
const int j_head = ij_disp[epi[tid].id_walk ].y;
const int j_tail = ij_disp[epi[tid].id_walk+1].y;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
force[tid].accp = accp;
}
#else
__device__ float4 ForceKernel_1walk(
float4 *jpsh,
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int tid = threadIdx.x;
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
for(int j=j_head; j<j_tail; j+=N_THREAD_GPU){
// __syncthreads();
jpsh[tid] = ((float4 *)(epj + j)) [tid];
// __syncthreads();
if(j_tail-j < N_THREAD_GPU){
for(int jj=0; jj<j_tail-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_2walk(
float4 jpsh[2][N_THREAD_GPU],
const float3 ipos,
const int id_walk,
const int iwalk0,
const int iwalk1,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int jbeg0 = ij_disp[iwalk0].y;
const int jbeg1 = ij_disp[iwalk1].y;
const int jend0 = ij_disp[iwalk0 + 1].y;
const int jend1 = ij_disp[iwalk1 + 1].y;
const int nj0 = jend0 - jbeg0;
const int nj1 = jend1 - jbeg1;
const int nj_longer = nj0 > nj1 ? nj0 : nj1;
const int nj_shorter = nj0 > nj1 ? nj1 : nj0;
const int walk_longer= nj0 > nj1 ? 0 : 1;
const int jbeg_longer = nj0 > nj1 ? jbeg0 : jbeg1;
const int mywalk = id_walk==iwalk0 ? 0 : 1;
const int tid = threadIdx.x;
for(int j=0; j<nj_shorter; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg0 + j)) [tid];
jpsh[1][tid] = ((float4 *)(epj + jbeg1 + j)) [tid];
if(nj_shorter-j < N_THREAD_GPU){
for(int jj=0; jj<nj_shorter-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}
}
for(int j=nj_shorter; j<nj_longer; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg_longer + j)) [tid];
int jrem = nj_longer - j;
if(jrem < N_THREAD_GPU){
for(int jj=0; jj<jrem; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_multiwalk(
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
#if 1
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#else
int njmin = j_tail - j_head;
njmin = min(njmin, __shfl_xor(njmin, 1));
njmin = min(njmin, __shfl_xor(njmin, 2));
njmin = min(njmin, __shfl_xor(njmin, 4));
njmin = min(njmin, __shfl_xor(njmin, 8));
njmin = min(njmin, __shfl_xor(njmin, 16));
njmin &= 3;;
for(int j=0; j<njmin; j+=4){
#pragma unroll 4
for(int jj=0; jj<4; jj++){
float4 jposm = epj[j_head + j + jj].posm;
float4 jposm = jpf[jj];
accp = dev_gravity(eps2, ipos, jposm, accp);
}
}
for(int j=j_head+njmin; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#endif
return accp;
}
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float3 ipos = epi[tid].pos;
int id_walk = epi[tid].id_walk;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
int t_head = blockDim.x * blockIdx.x;
int t_tail = t_head + N_THREAD_GPU - 1;
int nwalk_in_block = 1 + (epi[t_tail].id_walk - epi[t_head].id_walk);
__shared__ float4 jpsh[2][N_THREAD_GPU];
if(1 == nwalk_in_block){
accp = ForceKernel_1walk(jpsh[0], ipos, id_walk, ij_disp, epj, accp, eps2);
} else if(2 == nwalk_in_block){
// accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
int iwalk0 = epi[t_head].id_walk;
int iwalk1 = epi[t_tail].id_walk;
accp = ForceKernel_2walk(jpsh, ipos, id_walk, iwalk0, iwalk1, ij_disp, epj, accp, eps2);
} else{
accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
}
force[tid].accp = accp;
}
#endif
static cudaPointer<EpiGPU> dev_epi;
static cudaPointer<EpjGPU> dev_epj;
static cudaPointer<ForceGPU> dev_force;
static cudaPointer<int2> ij_disp;
static bool init_call = true;
PS::S32 DispatchKernelWithSP(
const PS::S32 tag,
const PS::S32 n_walk,
const FPGrav *epi[],
const PS::S32 n_epi[],
const FPGrav *epj[],
const PS::S32 n_epj[],
const PS::SPJMonopole *spj[],
const PS::S32 n_spj[]){
assert(n_walk <= N_WALK_LIMIT);
if(init_call){
dev_epi .allocate(NI_LIMIT);
dev_epj .allocate(NJ_LIMIT);
dev_force.allocate(NI_LIMIT);
ij_disp .allocate(N_WALK_LIMIT+2);
init_call = false;
}
const float eps2 = FPGrav::eps * FPGrav::eps;
ij_disp[0].x = 0;
ij_disp[0].y = 0;
for(int k=0; k<n_walk; k++){
ij_disp[k+1].x = ij_disp[k].x + n_epi[k];
ij_disp[k+1].y = ij_disp[k].y + (n_epj[k] + n_spj[k]);
}
ij_disp[n_walk+1] = ij_disp[n_walk];
assert(ij_disp[n_walk].x < NI_LIMIT);
assert(ij_disp[n_walk].y < NJ_LIMIT);
ij_disp.htod(n_walk + 2);
int ni_tot_reg = ij_disp[n_walk].x;
if(ni_tot_reg % N_THREAD_GPU){
ni_tot_reg /= N_THREAD_GPU;
ni_tot_reg++;
ni_tot_reg *= N_THREAD_GPU;
}
int ni_tot = 0;
int nj_tot = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<n_epi[iw]; i++){
dev_epi[ni_tot].pos.x = epi[iw][i].pos.x;
dev_epi[ni_tot].pos.y = epi[iw][i].pos.y;
dev_epi[ni_tot].pos.z = epi[iw][i].pos.z;
dev_epi[ni_tot].id_walk = iw;
ni_tot++;
}
for(int j=0; j<n_epj[iw]; j++){
dev_epj[nj_tot].posm.x = epj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = epj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = epj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = epj[iw][j].mass;
nj_tot++;
}
for(int j=0; j<n_spj[iw]; j++){
dev_epj[nj_tot].posm.x = spj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = spj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = spj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = spj[iw][j].getCharge();
nj_tot++;
}
}
for(int i=ni_tot; i<ni_tot_reg; i++){
dev_epi[i].id_walk = n_walk;
}
dev_epi.htod(ni_tot_reg);
dev_epj.htod(nj_tot);
int nblocks = ni_tot_reg / N_THREAD_GPU;
int nthreads = N_THREAD_GPU;
hipLaunchKernelGGL(( ForceKernel) , dim3(nblocks), dim3(nthreads), 0, 0, ij_disp, dev_epi, dev_epj, dev_force, eps2);
return 0;
}
PS::S32 RetrieveKernel(const PS::S32 tag,
const PS::S32 n_walk,
const PS::S32 ni[],
FPGrav *force[])
{
int ni_tot = 0;
for(int k=0; k<n_walk; k++){
ni_tot += ni[k];
}
dev_force.dtoh(ni_tot);
int n_cnt = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<ni[iw]; i++){
force[iw][i].acc.x = dev_force[n_cnt].accp.x;
force[iw][i].acc.y = dev_force[n_cnt].accp.y;
force[iw][i].acc.z = dev_force[n_cnt].accp.z;
force[iw][i].pot = dev_force[n_cnt].accp.w;
n_cnt++;
}
}
return 0;
}
| 427da85b04f6fb44d269fe7e5f764956cd43f9e3.cu | //#include "class.hpp"
//#include "force.hpp"
#include<particle_simulator.hpp>
#include "cuda_pointer.h"
#include "force_gpu_cuda.hpp"
enum{
N_THREAD_GPU = 32,
N_WALK_LIMIT = 1000,
NI_LIMIT = N_WALK_LIMIT*1000,
NJ_LIMIT = N_WALK_LIMIT*10000,
};
struct EpiGPU{
float3 pos;
int id_walk;
};
struct EpjGPU{
float4 posm;
};
struct ForceGPU{
float4 accp;
};
inline __device__ float4 dev_gravity(
float eps2,
float3 ipos,
float4 jposm,
float4 accp)
{
float dx = jposm.x - ipos.x;
float dy = jposm.y - ipos.y;
float dz = jposm.z - ipos.z;
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float rinv = rsqrtf(r2);
float pij = jposm.w * rinv;
float mri3 = rinv*rinv * pij;
accp.x += mri3 * dx;
accp.y += mri3 * dy;
accp.z += mri3 * dz;
accp.w -= pij;
return accp;
}
#if 0
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float3 ipos = epi[tid].pos;
const int j_head = ij_disp[epi[tid].id_walk ].y;
const int j_tail = ij_disp[epi[tid].id_walk+1].y;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
force[tid].accp = accp;
}
#else
__device__ float4 ForceKernel_1walk(
float4 *jpsh,
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int tid = threadIdx.x;
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
for(int j=j_head; j<j_tail; j+=N_THREAD_GPU){
// __syncthreads();
jpsh[tid] = ((float4 *)(epj + j)) [tid];
// __syncthreads();
if(j_tail-j < N_THREAD_GPU){
for(int jj=0; jj<j_tail-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_2walk(
float4 jpsh[2][N_THREAD_GPU],
const float3 ipos,
const int id_walk,
const int iwalk0,
const int iwalk1,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int jbeg0 = ij_disp[iwalk0].y;
const int jbeg1 = ij_disp[iwalk1].y;
const int jend0 = ij_disp[iwalk0 + 1].y;
const int jend1 = ij_disp[iwalk1 + 1].y;
const int nj0 = jend0 - jbeg0;
const int nj1 = jend1 - jbeg1;
const int nj_longer = nj0 > nj1 ? nj0 : nj1;
const int nj_shorter = nj0 > nj1 ? nj1 : nj0;
const int walk_longer= nj0 > nj1 ? 0 : 1;
const int jbeg_longer = nj0 > nj1 ? jbeg0 : jbeg1;
const int mywalk = id_walk==iwalk0 ? 0 : 1;
const int tid = threadIdx.x;
for(int j=0; j<nj_shorter; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg0 + j)) [tid];
jpsh[1][tid] = ((float4 *)(epj + jbeg1 + j)) [tid];
if(nj_shorter-j < N_THREAD_GPU){
for(int jj=0; jj<nj_shorter-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}
}
for(int j=nj_shorter; j<nj_longer; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg_longer + j)) [tid];
int jrem = nj_longer - j;
if(jrem < N_THREAD_GPU){
for(int jj=0; jj<jrem; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_multiwalk(
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
#if 1
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#else
int njmin = j_tail - j_head;
njmin = min(njmin, __shfl_xor(njmin, 1));
njmin = min(njmin, __shfl_xor(njmin, 2));
njmin = min(njmin, __shfl_xor(njmin, 4));
njmin = min(njmin, __shfl_xor(njmin, 8));
njmin = min(njmin, __shfl_xor(njmin, 16));
njmin &= 3;;
for(int j=0; j<njmin; j+=4){
#pragma unroll 4
for(int jj=0; jj<4; jj++){
float4 jposm = epj[j_head + j + jj].posm;
float4 jposm = jpf[jj];
accp = dev_gravity(eps2, ipos, jposm, accp);
}
}
for(int j=j_head+njmin; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#endif
return accp;
}
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float3 ipos = epi[tid].pos;
int id_walk = epi[tid].id_walk;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
int t_head = blockDim.x * blockIdx.x;
int t_tail = t_head + N_THREAD_GPU - 1;
int nwalk_in_block = 1 + (epi[t_tail].id_walk - epi[t_head].id_walk);
__shared__ float4 jpsh[2][N_THREAD_GPU];
if(1 == nwalk_in_block){
accp = ForceKernel_1walk(jpsh[0], ipos, id_walk, ij_disp, epj, accp, eps2);
} else if(2 == nwalk_in_block){
// accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
int iwalk0 = epi[t_head].id_walk;
int iwalk1 = epi[t_tail].id_walk;
accp = ForceKernel_2walk(jpsh, ipos, id_walk, iwalk0, iwalk1, ij_disp, epj, accp, eps2);
} else{
accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
}
force[tid].accp = accp;
}
#endif
static cudaPointer<EpiGPU> dev_epi;
static cudaPointer<EpjGPU> dev_epj;
static cudaPointer<ForceGPU> dev_force;
static cudaPointer<int2> ij_disp;
static bool init_call = true;
PS::S32 DispatchKernelWithSP(
const PS::S32 tag,
const PS::S32 n_walk,
const FPGrav *epi[],
const PS::S32 n_epi[],
const FPGrav *epj[],
const PS::S32 n_epj[],
const PS::SPJMonopole *spj[],
const PS::S32 n_spj[]){
assert(n_walk <= N_WALK_LIMIT);
if(init_call){
dev_epi .allocate(NI_LIMIT);
dev_epj .allocate(NJ_LIMIT);
dev_force.allocate(NI_LIMIT);
ij_disp .allocate(N_WALK_LIMIT+2);
init_call = false;
}
const float eps2 = FPGrav::eps * FPGrav::eps;
ij_disp[0].x = 0;
ij_disp[0].y = 0;
for(int k=0; k<n_walk; k++){
ij_disp[k+1].x = ij_disp[k].x + n_epi[k];
ij_disp[k+1].y = ij_disp[k].y + (n_epj[k] + n_spj[k]);
}
ij_disp[n_walk+1] = ij_disp[n_walk];
assert(ij_disp[n_walk].x < NI_LIMIT);
assert(ij_disp[n_walk].y < NJ_LIMIT);
ij_disp.htod(n_walk + 2);
int ni_tot_reg = ij_disp[n_walk].x;
if(ni_tot_reg % N_THREAD_GPU){
ni_tot_reg /= N_THREAD_GPU;
ni_tot_reg++;
ni_tot_reg *= N_THREAD_GPU;
}
int ni_tot = 0;
int nj_tot = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<n_epi[iw]; i++){
dev_epi[ni_tot].pos.x = epi[iw][i].pos.x;
dev_epi[ni_tot].pos.y = epi[iw][i].pos.y;
dev_epi[ni_tot].pos.z = epi[iw][i].pos.z;
dev_epi[ni_tot].id_walk = iw;
ni_tot++;
}
for(int j=0; j<n_epj[iw]; j++){
dev_epj[nj_tot].posm.x = epj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = epj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = epj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = epj[iw][j].mass;
nj_tot++;
}
for(int j=0; j<n_spj[iw]; j++){
dev_epj[nj_tot].posm.x = spj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = spj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = spj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = spj[iw][j].getCharge();
nj_tot++;
}
}
for(int i=ni_tot; i<ni_tot_reg; i++){
dev_epi[i].id_walk = n_walk;
}
dev_epi.htod(ni_tot_reg);
dev_epj.htod(nj_tot);
int nblocks = ni_tot_reg / N_THREAD_GPU;
int nthreads = N_THREAD_GPU;
ForceKernel <<<nblocks, nthreads>>> (ij_disp, dev_epi, dev_epj, dev_force, eps2);
return 0;
}
PS::S32 RetrieveKernel(const PS::S32 tag,
const PS::S32 n_walk,
const PS::S32 ni[],
FPGrav *force[])
{
int ni_tot = 0;
for(int k=0; k<n_walk; k++){
ni_tot += ni[k];
}
dev_force.dtoh(ni_tot);
int n_cnt = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<ni[iw]; i++){
force[iw][i].acc.x = dev_force[n_cnt].accp.x;
force[iw][i].acc.y = dev_force[n_cnt].accp.y;
force[iw][i].acc.z = dev_force[n_cnt].accp.z;
force[iw][i].pot = dev_force[n_cnt].accp.w;
n_cnt++;
}
}
return 0;
}
|
00102adc19b2182b3c0fafe521ff77516451122a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
@author Jan Nemec, [email protected]
*/
#include <stdio.h>
#include <stdlib.h>
/*
Global settings
*/
/** Compile as emulation or use CUDA */
#define EMULATION 0
/** Number of non input and non output groups of neuron */
#define HIDDEN_GROUPS 5
#define GROUP_COUNT (HIDDEN_GROUPS + 2)
/** Number of neuron in each group */
#define NEURONS_IN_GROUP 100
/** Divide each float coef by this */
#define DIVIDE_COEF 8192
/** bigger TRESHOLD_RAND -> bigger tresholds */
#define TRESHOLD_RAND 32768
/** maximal number of external connections */
#define MAX_EXTERNAL_CONNECTIONS 8
/** bigger WEIGHT_RAND -> bigger weights */
#define WEIGHT_RAND 110
/** bigger INPUT_RAND -> bigger input in the input layer */
#define INPUT_RAND 256
/** how many steps to copmpute */
#define ITERATIONS 1000
/*
Global types
*/
/** we will compute in this type */
typedef float FLOAT_TYPE;
/** Network of neurons */
typedef struct
{
/* full matrix NEURONS_IN_GROUP * NEURONS_IN_GROUP
weight from 1 to 2 is in w[group][1 + 2 * NEURONS_IN_GROUP] */
FLOAT_TYPE w[GROUP_COUNT * NEURONS_IN_GROUP * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP
Fixed input (addes every step to potential of the neuron) */
FLOAT_TYPE inputs[GROUP_COUNT * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP */
FLOAT_TYPE tresholds[GROUP_COUNT * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP */
FLOAT_TYPE potentials[GROUP_COUNT * NEURONS_IN_GROUP];
/* is each neuron active in the curren step */
unsigned char active[GROUP_COUNT * NEURONS_IN_GROUP];
/** Connections from another group
connections_xx[group][1][2] is the third (0, 1, 2) connection of the second (0, 1)
neuron */
int connection_group[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
int connection_neuron[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
FLOAT_TYPE connection_w[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
/** number of external connections */
int connection_count[GROUP_COUNT * NEURONS_IN_GROUP];
} TNetwork;
/**
Inits every single group of the network.
*/
void initNetwork(TNetwork *net)
{
int group;
for (group = 0; group < GROUP_COUNT; group++)
{
int i;
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
int j;
/* init connections from other groups */
int limit = net->connection_count[group * NEURONS_IN_GROUP + i] = rand() % MAX_EXTERNAL_CONNECTIONS;
for (j = 0; j < limit; j++)
{
int index = group * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ i * MAX_EXTERNAL_CONNECTIONS + j;
net->connection_group[index] = rand() % GROUP_COUNT;
net->connection_neuron[index] = rand() % NEURONS_IN_GROUP;
net->connection_w[index] = ((rand() % WEIGHT_RAND) / (FLOAT_TYPE) DIVIDE_COEF);
}
}
/* init connections inside this group */
for (i = 0; i < NEURONS_IN_GROUP * NEURONS_IN_GROUP; i++)
{
net->w[group * NEURONS_IN_GROUP * NEURONS_IN_GROUP + i] =
(rand() % WEIGHT_RAND) / (FLOAT_TYPE) DIVIDE_COEF;
}
/* init all the data for each neuron */
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
int index = group * NEURONS_IN_GROUP + i;
net->inputs[index] = group ? 0 :
/* "normal" distribution to get more stable result */
(
(rand() % INPUT_RAND) + (rand() % INPUT_RAND) +
(rand() % INPUT_RAND) + (rand() % INPUT_RAND)
) / (FLOAT_TYPE) (DIVIDE_COEF * 4);
net->tresholds[index] = (1 + (rand() % TRESHOLD_RAND)) /
(FLOAT_TYPE) DIVIDE_COEF;
net->potentials[index] = 0;
net->active[index] = 0;
}
}
}
/* print the sinle line of the output */
void printOutputArray(int line, const unsigned char *output)
{
int i;
printf("%i ", line);
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
putchar(output[i] ? '1' : '0');
}
puts("");
}
#if EMULATION
/**
Single step of the computing
*/
void step(TNetwork *net)
{
int i;
/* The first step - connections from other group */
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
int k;
int limit = net->connection_count[i * NEURONS_IN_GROUP + j];
/* for each connection (from the other group) of the neuron */
for (k = 0; k < limit; k++)
{
int index = i * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ j * MAX_EXTERNAL_CONNECTIONS + k;
/* if the other neuron is active*/
if (
net->active
[ net->connection_group[index] * NEURONS_IN_GROUP +
net->connection_neuron[index] ]
)
{
/* add a bonus to our potential */
net->potentials[i * NEURONS_IN_GROUP + j] +=
net->connection_w[index];
}
}
}
}
/* The second step */
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j, k;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
FLOAT_TYPE *ptrW = net->w +
i * (NEURONS_IN_GROUP * NEURONS_IN_GROUP) +
j * NEURONS_IN_GROUP;
unsigned char *ptrA = net->active + i * NEURONS_IN_GROUP;
int index = i * NEURONS_IN_GROUP + j;
/* for each connection */
for (k = 0; k < NEURONS_IN_GROUP; k++)
{
if (*ptrA)
{
/* add the weight if the neuron is active */
net->potentials[index] += *ptrW;
}
ptrW++;
ptrA++;
}
/* Add input to the potential */
net->potentials[index] += net->inputs[index];
}
}
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
int index = i * NEURONS_IN_GROUP + j;
/* Check tresholds and set active neuron*/
if (net->potentials[index] >= net->tresholds[index])
{
net->potentials[index] = 0;
net->active[index] = 1;
}
else
{
net->active[index] = 0;
}
}
}
}
/* print the output of the network */
void printResult(int line, TNetwork *net)
{
printOutputArray(line, net->active + (GROUP_COUNT - 1) * NEURONS_IN_GROUP);
}
#else
/**
One step of computing - updating of potentials
*/
__global__ void updatePotentials(int *d_connection_count,
unsigned char *d_active, int *d_connection_group,
int *d_connection_neuron, FLOAT_TYPE *d_connection_w,
FLOAT_TYPE *d_potentials, FLOAT_TYPE *d_w, FLOAT_TYPE *d_inputs)
{
int g = blockIdx.x;
int n = threadIdx.x;
int k;
int index = NEURONS_IN_GROUP * g + n;
int limit = d_connection_count[index];
/* for each connection (from the other group) of the neuron */
for (k = 0; k < limit; k++)
{
int index2 = g * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ n * MAX_EXTERNAL_CONNECTIONS + k;
if (
d_active
[NEURONS_IN_GROUP * d_connection_group[index2] +
d_connection_neuron[index2] ]
)
{
/* add a bonus to our potential */
d_potentials[index] += d_connection_w[index2];
}
}
FLOAT_TYPE *ptrW = d_w +
g * (NEURONS_IN_GROUP * NEURONS_IN_GROUP) +
n * NEURONS_IN_GROUP;
unsigned char *ptrA = d_active + g * NEURONS_IN_GROUP;
/* for each connection */
for (k = 0; k < NEURONS_IN_GROUP; k++)
{
if (*ptrA)
{
/* add the weight if the neuron is active */
d_potentials[index] += *ptrW;
}
ptrW++;
ptrA++;
}
/* Add input to the potential */
d_potentials[index] += d_inputs[index];
}
/**
One step of computing - updating of active states
*/
__global__ void updateActive(FLOAT_TYPE *d_potentials,
FLOAT_TYPE *d_tresholds, unsigned char *d_active)
{
int g = blockIdx.x;
int n = threadIdx.x;
int index = NEURONS_IN_GROUP * g + n;
if (d_potentials[index] >= d_tresholds[index])
{
d_potentials[index] = 0;
d_active[index] = 1;
}
else
{
d_active[index] = 0;
}
}
/** report error and exit */
void handleError(hipError_t e, const char *function)
{
fprintf(stderr, "Error %u in %s (%s), exiting\n",
(unsigned) e, function, hipGetErrorString(e));
exit(1);
}
/** check hipGetLastError() */
void checkAndHandleKernelError(const char *function)
{
hipError_t e;
e = hipGetLastError();
if (e != hipSuccess)
{
handleError(e, function);
}
}
/** check the function call return code */
void checkAndHandleFunctionError(hipError_t e, const char *function)
{
if (e != hipSuccess)
{
handleError(e, function);
}
}
#endif
int main(void)
{
int i;
TNetwork *net = (TNetwork *)malloc(sizeof(TNetwork));
srand(time(NULL));
initNetwork(net);
#if EMULATION
for (i = 0; i < ITERATIONS; i++)
{
step(net);
printResult(i, net);
}
#else
/* arrays for kernels */
FLOAT_TYPE *d_w;
FLOAT_TYPE *d_inputs;
FLOAT_TYPE *d_tresholds;
FLOAT_TYPE *d_potentials;
unsigned char *d_active;
int *d_connection_group;
int *d_connection_neuron;
FLOAT_TYPE *d_connection_w;
int *d_connection_count;
/* allocate the memory for kernels and copy from PC struct */
int w_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP *
NEURONS_IN_GROUP;
checkAndHandleFunctionError(hipMalloc(&d_w, w_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_w, net->w, w_size,
hipMemcpyHostToDevice), "hipMemcpy");
int inputs_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(hipMalloc(&d_inputs, inputs_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_inputs, net->inputs, inputs_size,
hipMemcpyHostToDevice), "hipMemcpy");
checkAndHandleFunctionError(hipMalloc(&d_tresholds, inputs_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_tresholds, net->tresholds, inputs_size,
hipMemcpyHostToDevice), "hipMemcpy");
checkAndHandleFunctionError(hipMalloc(&d_potentials, inputs_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_potentials, net->potentials, inputs_size,
hipMemcpyHostToDevice), "hipMemcpy");
int active_size = sizeof(unsigned char) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(hipMalloc(&d_active, active_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_active, net->active, active_size,
hipMemcpyHostToDevice), "hipMemcpy");
int connection_group_size = sizeof(int) * GROUP_COUNT * NEURONS_IN_GROUP *
MAX_EXTERNAL_CONNECTIONS;
checkAndHandleFunctionError(hipMalloc(&d_connection_group,
connection_group_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_connection_group,
net->connection_group, connection_group_size, hipMemcpyHostToDevice),
"hipMemcpy");
checkAndHandleFunctionError(hipMalloc(&d_connection_neuron,
connection_group_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_connection_neuron,
net->connection_neuron, connection_group_size, hipMemcpyHostToDevice),
"hipMemcpy");
int connection_w_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP
* MAX_EXTERNAL_CONNECTIONS;
checkAndHandleFunctionError(hipMalloc(&d_connection_w,
connection_w_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_connection_w,
net->connection_w, connection_w_size, hipMemcpyHostToDevice),
"hipMemcpy");
int connection_count_size = sizeof(int) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(hipMalloc(&d_connection_count,
connection_count_size), "hipMalloc");
checkAndHandleFunctionError(hipMemcpy(d_connection_count,
net->connection_count, connection_count_size, hipMemcpyHostToDevice),
"hipMemcpy");
for (i = 0; i < ITERATIONS; i++)
{
unsigned char active[NEURONS_IN_GROUP];
hipLaunchKernelGGL(( updatePotentials), dim3(GROUP_COUNT), dim3(NEURONS_IN_GROUP), 0, 0, d_connection_count,
d_active, d_connection_group, d_connection_neuron, d_connection_w,
d_potentials, d_w, d_inputs);
checkAndHandleKernelError("updatePotentials");
hipLaunchKernelGGL(( updateActive), dim3(GROUP_COUNT), dim3(NEURONS_IN_GROUP), 0, 0, d_potentials,
d_tresholds, d_active);
checkAndHandleKernelError("updateActive");
checkAndHandleFunctionError(hipMemcpy(active, d_active,
sizeof(unsigned char) * NEURONS_IN_GROUP, hipMemcpyDeviceToHost),
"hipMemcpy");
printOutputArray(i, active);
}
/* Free all the memory used by kernels */
checkAndHandleFunctionError(hipFree(d_w), "hipFree");
checkAndHandleFunctionError(hipFree(d_inputs), "hipFree");
checkAndHandleFunctionError(hipFree(d_potentials), "hipFree");
checkAndHandleFunctionError(hipFree(d_tresholds), "hipFree");
checkAndHandleFunctionError(hipFree(d_active), "hipFree");
checkAndHandleFunctionError(hipFree(d_connection_group), "hipFree");
checkAndHandleFunctionError(hipFree(d_connection_neuron), "hipFree");
checkAndHandleFunctionError(hipFree(d_connection_w), "hipFree");
checkAndHandleFunctionError(hipFree(d_connection_count), "hipFree");
#endif
free(net);
return 0;
}
| 00102adc19b2182b3c0fafe521ff77516451122a.cu | /*
@author Jan Nemec, [email protected]
*/
#include <stdio.h>
#include <stdlib.h>
/*
Global settings
*/
/** Compile as emulation or use CUDA */
#define EMULATION 0
/** Number of non input and non output groups of neuron */
#define HIDDEN_GROUPS 5
#define GROUP_COUNT (HIDDEN_GROUPS + 2)
/** Number of neuron in each group */
#define NEURONS_IN_GROUP 100
/** Divide each float coef by this */
#define DIVIDE_COEF 8192
/** bigger TRESHOLD_RAND -> bigger tresholds */
#define TRESHOLD_RAND 32768
/** maximal number of external connections */
#define MAX_EXTERNAL_CONNECTIONS 8
/** bigger WEIGHT_RAND -> bigger weights */
#define WEIGHT_RAND 110
/** bigger INPUT_RAND -> bigger input in the input layer */
#define INPUT_RAND 256
/** how many steps to copmpute */
#define ITERATIONS 1000
/*
Global types
*/
/** we will compute in this type */
typedef float FLOAT_TYPE;
/** Network of neurons */
typedef struct
{
/* full matrix NEURONS_IN_GROUP * NEURONS_IN_GROUP
weight from 1 to 2 is in w[group][1 + 2 * NEURONS_IN_GROUP] */
FLOAT_TYPE w[GROUP_COUNT * NEURONS_IN_GROUP * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP
Fixed input (addes every step to potential of the neuron) */
FLOAT_TYPE inputs[GROUP_COUNT * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP */
FLOAT_TYPE tresholds[GROUP_COUNT * NEURONS_IN_GROUP];
/* 0 .. NEURONS_IN_GROUP */
FLOAT_TYPE potentials[GROUP_COUNT * NEURONS_IN_GROUP];
/* is each neuron active in the curren step */
unsigned char active[GROUP_COUNT * NEURONS_IN_GROUP];
/** Connections from another group
connections_xx[group][1][2] is the third (0, 1, 2) connection of the second (0, 1)
neuron */
int connection_group[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
int connection_neuron[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
FLOAT_TYPE connection_w[GROUP_COUNT * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS];
/** number of external connections */
int connection_count[GROUP_COUNT * NEURONS_IN_GROUP];
} TNetwork;
/**
Inits every single group of the network.
*/
void initNetwork(TNetwork *net)
{
int group;
for (group = 0; group < GROUP_COUNT; group++)
{
int i;
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
int j;
/* init connections from other groups */
int limit = net->connection_count[group * NEURONS_IN_GROUP + i] = rand() % MAX_EXTERNAL_CONNECTIONS;
for (j = 0; j < limit; j++)
{
int index = group * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ i * MAX_EXTERNAL_CONNECTIONS + j;
net->connection_group[index] = rand() % GROUP_COUNT;
net->connection_neuron[index] = rand() % NEURONS_IN_GROUP;
net->connection_w[index] = ((rand() % WEIGHT_RAND) / (FLOAT_TYPE) DIVIDE_COEF);
}
}
/* init connections inside this group */
for (i = 0; i < NEURONS_IN_GROUP * NEURONS_IN_GROUP; i++)
{
net->w[group * NEURONS_IN_GROUP * NEURONS_IN_GROUP + i] =
(rand() % WEIGHT_RAND) / (FLOAT_TYPE) DIVIDE_COEF;
}
/* init all the data for each neuron */
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
int index = group * NEURONS_IN_GROUP + i;
net->inputs[index] = group ? 0 :
/* "normal" distribution to get more stable result */
(
(rand() % INPUT_RAND) + (rand() % INPUT_RAND) +
(rand() % INPUT_RAND) + (rand() % INPUT_RAND)
) / (FLOAT_TYPE) (DIVIDE_COEF * 4);
net->tresholds[index] = (1 + (rand() % TRESHOLD_RAND)) /
(FLOAT_TYPE) DIVIDE_COEF;
net->potentials[index] = 0;
net->active[index] = 0;
}
}
}
/* print the sinle line of the output */
void printOutputArray(int line, const unsigned char *output)
{
int i;
printf("%i ", line);
for (i = 0; i < NEURONS_IN_GROUP; i++)
{
putchar(output[i] ? '1' : '0');
}
puts("");
}
#if EMULATION
/**
Single step of the computing
*/
void step(TNetwork *net)
{
int i;
/* The first step - connections from other group */
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
int k;
int limit = net->connection_count[i * NEURONS_IN_GROUP + j];
/* for each connection (from the other group) of the neuron */
for (k = 0; k < limit; k++)
{
int index = i * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ j * MAX_EXTERNAL_CONNECTIONS + k;
/* if the other neuron is active*/
if (
net->active
[ net->connection_group[index] * NEURONS_IN_GROUP +
net->connection_neuron[index] ]
)
{
/* add a bonus to our potential */
net->potentials[i * NEURONS_IN_GROUP + j] +=
net->connection_w[index];
}
}
}
}
/* The second step */
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j, k;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
FLOAT_TYPE *ptrW = net->w +
i * (NEURONS_IN_GROUP * NEURONS_IN_GROUP) +
j * NEURONS_IN_GROUP;
unsigned char *ptrA = net->active + i * NEURONS_IN_GROUP;
int index = i * NEURONS_IN_GROUP + j;
/* for each connection */
for (k = 0; k < NEURONS_IN_GROUP; k++)
{
if (*ptrA)
{
/* add the weight if the neuron is active */
net->potentials[index] += *ptrW;
}
ptrW++;
ptrA++;
}
/* Add input to the potential */
net->potentials[index] += net->inputs[index];
}
}
/* for each group */
for (i = 0; i < GROUP_COUNT; i++)
{
int j;
/* for each neuron in the group */
for (j = 0; j < NEURONS_IN_GROUP; j++)
{
int index = i * NEURONS_IN_GROUP + j;
/* Check tresholds and set active neuron*/
if (net->potentials[index] >= net->tresholds[index])
{
net->potentials[index] = 0;
net->active[index] = 1;
}
else
{
net->active[index] = 0;
}
}
}
}
/* print the output of the network */
void printResult(int line, TNetwork *net)
{
printOutputArray(line, net->active + (GROUP_COUNT - 1) * NEURONS_IN_GROUP);
}
#else
/**
One step of computing - updating of potentials
*/
__global__ void updatePotentials(int *d_connection_count,
unsigned char *d_active, int *d_connection_group,
int *d_connection_neuron, FLOAT_TYPE *d_connection_w,
FLOAT_TYPE *d_potentials, FLOAT_TYPE *d_w, FLOAT_TYPE *d_inputs)
{
int g = blockIdx.x;
int n = threadIdx.x;
int k;
int index = NEURONS_IN_GROUP * g + n;
int limit = d_connection_count[index];
/* for each connection (from the other group) of the neuron */
for (k = 0; k < limit; k++)
{
int index2 = g * NEURONS_IN_GROUP * MAX_EXTERNAL_CONNECTIONS
+ n * MAX_EXTERNAL_CONNECTIONS + k;
if (
d_active
[NEURONS_IN_GROUP * d_connection_group[index2] +
d_connection_neuron[index2] ]
)
{
/* add a bonus to our potential */
d_potentials[index] += d_connection_w[index2];
}
}
FLOAT_TYPE *ptrW = d_w +
g * (NEURONS_IN_GROUP * NEURONS_IN_GROUP) +
n * NEURONS_IN_GROUP;
unsigned char *ptrA = d_active + g * NEURONS_IN_GROUP;
/* for each connection */
for (k = 0; k < NEURONS_IN_GROUP; k++)
{
if (*ptrA)
{
/* add the weight if the neuron is active */
d_potentials[index] += *ptrW;
}
ptrW++;
ptrA++;
}
/* Add input to the potential */
d_potentials[index] += d_inputs[index];
}
/**
One step of computing - updating of active states
*/
__global__ void updateActive(FLOAT_TYPE *d_potentials,
FLOAT_TYPE *d_tresholds, unsigned char *d_active)
{
int g = blockIdx.x;
int n = threadIdx.x;
int index = NEURONS_IN_GROUP * g + n;
if (d_potentials[index] >= d_tresholds[index])
{
d_potentials[index] = 0;
d_active[index] = 1;
}
else
{
d_active[index] = 0;
}
}
/** report error and exit */
void handleError(cudaError_t e, const char *function)
{
fprintf(stderr, "Error %u in %s (%s), exiting\n",
(unsigned) e, function, cudaGetErrorString(e));
exit(1);
}
/** check cudaGetLastError() */
void checkAndHandleKernelError(const char *function)
{
cudaError_t e;
e = cudaGetLastError();
if (e != cudaSuccess)
{
handleError(e, function);
}
}
/** check the function call return code */
void checkAndHandleFunctionError(cudaError_t e, const char *function)
{
if (e != cudaSuccess)
{
handleError(e, function);
}
}
#endif
int main(void)
{
int i;
TNetwork *net = (TNetwork *)malloc(sizeof(TNetwork));
srand(time(NULL));
initNetwork(net);
#if EMULATION
for (i = 0; i < ITERATIONS; i++)
{
step(net);
printResult(i, net);
}
#else
/* arrays for kernels */
FLOAT_TYPE *d_w;
FLOAT_TYPE *d_inputs;
FLOAT_TYPE *d_tresholds;
FLOAT_TYPE *d_potentials;
unsigned char *d_active;
int *d_connection_group;
int *d_connection_neuron;
FLOAT_TYPE *d_connection_w;
int *d_connection_count;
/* allocate the memory for kernels and copy from PC struct */
int w_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP *
NEURONS_IN_GROUP;
checkAndHandleFunctionError(cudaMalloc(&d_w, w_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_w, net->w, w_size,
cudaMemcpyHostToDevice), "cudaMemcpy");
int inputs_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(cudaMalloc(&d_inputs, inputs_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_inputs, net->inputs, inputs_size,
cudaMemcpyHostToDevice), "cudaMemcpy");
checkAndHandleFunctionError(cudaMalloc(&d_tresholds, inputs_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_tresholds, net->tresholds, inputs_size,
cudaMemcpyHostToDevice), "cudaMemcpy");
checkAndHandleFunctionError(cudaMalloc(&d_potentials, inputs_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_potentials, net->potentials, inputs_size,
cudaMemcpyHostToDevice), "cudaMemcpy");
int active_size = sizeof(unsigned char) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(cudaMalloc(&d_active, active_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_active, net->active, active_size,
cudaMemcpyHostToDevice), "cudaMemcpy");
int connection_group_size = sizeof(int) * GROUP_COUNT * NEURONS_IN_GROUP *
MAX_EXTERNAL_CONNECTIONS;
checkAndHandleFunctionError(cudaMalloc(&d_connection_group,
connection_group_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_connection_group,
net->connection_group, connection_group_size, cudaMemcpyHostToDevice),
"cudaMemcpy");
checkAndHandleFunctionError(cudaMalloc(&d_connection_neuron,
connection_group_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_connection_neuron,
net->connection_neuron, connection_group_size, cudaMemcpyHostToDevice),
"cudaMemcpy");
int connection_w_size = sizeof(FLOAT_TYPE) * GROUP_COUNT * NEURONS_IN_GROUP
* MAX_EXTERNAL_CONNECTIONS;
checkAndHandleFunctionError(cudaMalloc(&d_connection_w,
connection_w_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_connection_w,
net->connection_w, connection_w_size, cudaMemcpyHostToDevice),
"cudaMemcpy");
int connection_count_size = sizeof(int) * GROUP_COUNT * NEURONS_IN_GROUP;
checkAndHandleFunctionError(cudaMalloc(&d_connection_count,
connection_count_size), "cudaMalloc");
checkAndHandleFunctionError(cudaMemcpy(d_connection_count,
net->connection_count, connection_count_size, cudaMemcpyHostToDevice),
"cudaMemcpy");
for (i = 0; i < ITERATIONS; i++)
{
unsigned char active[NEURONS_IN_GROUP];
updatePotentials<<<GROUP_COUNT, NEURONS_IN_GROUP>>>(d_connection_count,
d_active, d_connection_group, d_connection_neuron, d_connection_w,
d_potentials, d_w, d_inputs);
checkAndHandleKernelError("updatePotentials");
updateActive<<<GROUP_COUNT, NEURONS_IN_GROUP>>>(d_potentials,
d_tresholds, d_active);
checkAndHandleKernelError("updateActive");
checkAndHandleFunctionError(cudaMemcpy(active, d_active,
sizeof(unsigned char) * NEURONS_IN_GROUP, cudaMemcpyDeviceToHost),
"cudaMemcpy");
printOutputArray(i, active);
}
/* Free all the memory used by kernels */
checkAndHandleFunctionError(cudaFree(d_w), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_inputs), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_potentials), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_tresholds), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_active), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_connection_group), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_connection_neuron), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_connection_w), "cudaFree");
checkAndHandleFunctionError(cudaFree(d_connection_count), "cudaFree");
#endif
free(net);
return 0;
}
|
4e9590694c71f848977ac280061ec5ec996328a1.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// This file implements ClusterHelper::set.
#include "dca/phys/dca_step/cluster_solver/shared_tools/cluster_helper.cuh"
#include <mutex>
#include "dca/linalg/util/allocators/vectors_typedefs.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace details {
// dca::phys::solver::details::
__device__ __constant__ ClusterHelper cluster_real_helper;
__device__ __constant__ ClusterHelper cluster_momentum_helper;
void ClusterHelper::set(int nc, const int* add, int lda, const int* sub, int lds, bool momentum) {
static std::array<std::once_flag, 2> flags;
std::call_once(flags[momentum], [=]() {
ClusterHelper host_helper;
host_helper.nc_ = nc;
auto compact_transfer = [=](const int* matrix, int ldm, int** dest) {
linalg::util::HostVector<int> compact(nc * nc);
for (int j = 0; j < nc; ++j)
for (int i = 0; i < nc; ++i)
compact[i + nc * j] = matrix[i + ldm * j];
hipMalloc(dest, sizeof(int) * lds * nc);
hipMemcpy(*dest, compact.data(), sizeof(int) * nc * nc, hipMemcpyHostToDevice);
};
compact_transfer(add, lda, const_cast<int**>(&host_helper.add_matrix_));
compact_transfer(sub, lds, const_cast<int**>(&host_helper.sub_matrix_));
if (momentum) {
hipMemcpyToSymbol(cluster_momentum_helper, &host_helper, sizeof(ClusterHelper));
}
else {
hipMemcpyToSymbol(cluster_real_helper, &host_helper, sizeof(ClusterHelper));
}
});
}
} // namespace details
} // namespace solver
} // namespace phys
} // namespace dca
| 4e9590694c71f848977ac280061ec5ec996328a1.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// This file implements ClusterHelper::set.
#include "dca/phys/dca_step/cluster_solver/shared_tools/cluster_helper.cuh"
#include <mutex>
#include "dca/linalg/util/allocators/vectors_typedefs.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace details {
// dca::phys::solver::details::
__device__ __constant__ ClusterHelper cluster_real_helper;
__device__ __constant__ ClusterHelper cluster_momentum_helper;
void ClusterHelper::set(int nc, const int* add, int lda, const int* sub, int lds, bool momentum) {
static std::array<std::once_flag, 2> flags;
std::call_once(flags[momentum], [=]() {
ClusterHelper host_helper;
host_helper.nc_ = nc;
auto compact_transfer = [=](const int* matrix, int ldm, int** dest) {
linalg::util::HostVector<int> compact(nc * nc);
for (int j = 0; j < nc; ++j)
for (int i = 0; i < nc; ++i)
compact[i + nc * j] = matrix[i + ldm * j];
cudaMalloc(dest, sizeof(int) * lds * nc);
cudaMemcpy(*dest, compact.data(), sizeof(int) * nc * nc, cudaMemcpyHostToDevice);
};
compact_transfer(add, lda, const_cast<int**>(&host_helper.add_matrix_));
compact_transfer(sub, lds, const_cast<int**>(&host_helper.sub_matrix_));
if (momentum) {
cudaMemcpyToSymbol(cluster_momentum_helper, &host_helper, sizeof(ClusterHelper));
}
else {
cudaMemcpyToSymbol(cluster_real_helper, &host_helper, sizeof(ClusterHelper));
}
});
}
} // namespace details
} // namespace solver
} // namespace phys
} // namespace dca
|
1ae93f2ab11c9bfa59d7346dfc15c3d4042dae3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_k4a_align.h"
#define CUDA_THREADS_PER_BLOCK 16
static inline int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
template<typename T>
std::shared_ptr<T> make_device_copy(T obj)
{
T* d_data;
auto res = hipMalloc(&d_data, sizeof(T));
if (res != hipSuccess)
throw std::runtime_error("hipMalloc failed status: " + res);
hipMemcpy(d_data, &obj, sizeof(T), hipMemcpyHostToDevice);
return std::shared_ptr<T>(d_data, [](T* data) { hipFree(data); });
}
template<typename T>
std::shared_ptr<T> alloc_dev(int elements)
{
T* d_data;
auto res = hipMalloc(&d_data, sizeof(T) * elements);
if (res != hipSuccess)
throw std::runtime_error("hipMalloc failed status: " + res);
return std::shared_ptr<T>(d_data, [](T* p) { hipFree(p); });
}
template<class T>
void release_memory(T& obj)
{
obj = nullptr;
}
__device__
static bool cuda_project_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float xy[2],
float uv[2],
int& valid,
float J_xy[2 * 2])
{
float cx = camera_calibration->cx;
float cy = camera_calibration->cy;
float fx = camera_calibration->fx;
float fy = camera_calibration->fy;
float k1 = camera_calibration->k1;
float k2 = camera_calibration->k2;
float k3 = camera_calibration->k3;
float k4 = camera_calibration->k4;
float k5 = camera_calibration->k5;
float k6 = camera_calibration->k6;
float codx = camera_calibration->codx; // center of distortion is set to 0 for Brown Conrady model
float cody = camera_calibration->cody;
float p1 = camera_calibration->p1;
float p2 = camera_calibration->p2;
float max_radius_for_projection = camera_calibration->metric_radius * camera_calibration->metric_radius;
valid = 1;
float xp = xy[0] - codx;
float yp = xy[1] - cody;
float xp2 = xp * xp;
float yp2 = yp * yp;
float xyp = xp * yp;
float rs = xp2 + yp2;
if (rs > max_radius_for_projection)
{
valid = 0;
return true;
}
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + k1 * rs + k2 * rss + k3 * rsc;
float b = 1.f + k4 * rs + k5 * rss + k6 * rsc;
float bi;
if (b != 0.f)
{
bi = 1.f / b;
}
else
{
bi = 1.f;
}
float d = a * bi;
float xp_d = xp * d;
float yp_d = yp * d;
float rs_2xp2 = rs + 2.f * xp2;
float rs_2yp2 = rs + 2.f * yp2;
xp_d += rs_2xp2 * p2 + 2.f * xyp * p1;
yp_d += rs_2yp2 * p1 + 2.f * xyp * p2;
float xp_d_cx = xp_d + codx;
float yp_d_cy = yp_d + cody;
uv[0] = xp_d_cx * fx + cx;
uv[1] = yp_d_cy * fy + cy;
/*if (J_xy == 0)
{
return true;
}*/
// compute Jacobian matrix
float dudrs = k1 + 2.f * k2 * rs + 3.f * k3 * rss;
// compute d(b)/d(r^2)
float dvdrs = k4 + 2.f * k5 * rs + 3.f * k6 * rss;
float bis = bi * bi;
float dddrs = (dudrs * b - a * dvdrs) * bis;
float dddrs_2 = dddrs * 2.f;
float xp_dddrs_2 = xp * dddrs_2;
float yp_xp_dddrs_2 = yp * xp_dddrs_2;
J_xy[0] = fx * (d + xp * xp_dddrs_2 + 6.f * xp * p2 + 2.f * yp * p1);
J_xy[1] = fx * (yp_xp_dddrs_2 + 2.f * yp * p2 + 2.f * xp * p1);
J_xy[2] = fy * (yp_xp_dddrs_2 + 2.f * xp * p1 + 2.f * yp * p2);
J_xy[3] = fy * (d + yp * yp * dddrs_2 + 6.f * yp * p1 + 2.f * xp * p2);
return true;
}
__device__
static bool cuda_deproject_pixel_to_point_with_distortion_iterative(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float uv[2], float xy[3], int& valid, unsigned int max_passes)
{
valid = 1;
float Jinv[2 * 2];
float best_xy[2] = { 0.f, 0.f };
float best_err = FLT_MAX;
for (unsigned int pass = 0; pass < max_passes; ++pass)
{
float p[2];
float J[2 * 2];
if (cuda_project_pixel_to_point_with_distortion(camera_calibration, xy, p, valid, J) == false)
{
return false;
}
if (valid == 0)
{
return true;
}
float err_x = uv[0] - p[0];
float err_y = uv[1] - p[1];
float err = err_x * err_x + err_y * err_y;
if (err >= best_err)
{
xy[0] = best_xy[0];
xy[1] = best_xy[1];
break;
}
best_err = err;
best_xy[0] = xy[0];
best_xy[1] = xy[1];
float detJ = J[0] * J[3] - J[1] * J[2];
float inv_detJ = 1.f / detJ;
Jinv[0] = inv_detJ * J[3];
Jinv[3] = inv_detJ * J[0];
Jinv[1] = -inv_detJ * J[1];
Jinv[2] = -inv_detJ * J[2];
if (pass + 1 == max_passes || best_err < 1e-22f)
{
break;
}
float dx = Jinv[0] * err_x + Jinv[1] * err_y;
float dy = Jinv[2] * err_x + Jinv[3] * err_y;
xy[0] += dx;
xy[1] += dy;
}
if (best_err > 1e-6f)
{
valid = 0;
}
return true;
}
__device__
static void cuda_deproject_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float uv[2], float xy[3], float depth)
{
float xp_d = (uv[0] - camera_calibration->cx) / camera_calibration->fx - camera_calibration->codx;
float yp_d = (uv[1] - camera_calibration->cy) / camera_calibration->fy - camera_calibration->cody;
float rs = xp_d * xp_d + yp_d * yp_d;
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + camera_calibration->k1 * rs + camera_calibration->k2 * rss + camera_calibration->k3 * rsc;
float b = 1.f + camera_calibration->k4 * rs + camera_calibration->k5 * rss + camera_calibration->k6 * rsc;
float ai;
if (a != 0.f)
{
ai = 1.f / a;
}
else
{
ai = 1.f;
}
float di = ai * b;
float x = xp_d * di;
float y = yp_d * di;
// approximate correction for tangential params
float two_xy = 2.f * x * y;
float xx = x * x;
float yy = y * y;
x -= (yy + 3.f * xx) * camera_calibration->p2 + two_xy * camera_calibration->p1;
y -= (xx + 3.f * yy) * camera_calibration->p1 + two_xy * camera_calibration->p2;
// add on center of distortion
x += camera_calibration->codx;
y += camera_calibration->cody;
xy[0] = x;
xy[1] = y;
xy[2] = depth;
int valid;
if (cuda_deproject_pixel_to_point_with_distortion_iterative(camera_calibration, uv, xy, valid, 20))
{
xy[0] *= depth;
xy[1] *= depth;
xy[2] = depth;
}
else
{
xy[0] = xy[1] = xy[2] = 0.0f;
}
}
__device__
static bool cuda_project_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float xy[2],
float uv[2])
{
float cx = camera_calibration->cx;
float cy = camera_calibration->cy;
float fx = camera_calibration->fx;
float fy = camera_calibration->fy;
float k1 = camera_calibration->k1;
float k2 = camera_calibration->k2;
float k3 = camera_calibration->k3;
float k4 = camera_calibration->k4;
float k5 = camera_calibration->k5;
float k6 = camera_calibration->k6;
float codx = camera_calibration->codx; // center of distortion is set to 0 for Brown Conrady model
float cody = camera_calibration->cody;
float p1 = camera_calibration->p1;
float p2 = camera_calibration->p2;
float max_radius_for_projection = camera_calibration->metric_radius * camera_calibration->metric_radius;
float xp = xy[0] - codx;
float yp = xy[1] - cody;
float xp2 = xp * xp;
float yp2 = yp * yp;
float xyp = xp * yp;
float rs = xp2 + yp2;
if (rs > max_radius_for_projection)
{
return true;
}
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + k1 * rs + k2 * rss + k3 * rsc;
float b = 1.f + k4 * rs + k5 * rss + k6 * rsc;
float bi;
if (b != 0.f)
{
bi = 1.f / b;
}
else
{
bi = 1.f;
}
float d = a * bi;
float xp_d = xp * d;
float yp_d = yp * d;
float rs_2xp2 = rs + 2.f * xp2;
float rs_2yp2 = rs + 2.f * yp2;
xp_d += rs_2xp2 * p2 + 2.f * xyp * p1;
yp_d += rs_2yp2 * p1 + 2.f * xyp * p2;
float xp_d_cx = xp_d + codx;
float yp_d_cy = yp_d + cody;
uv[0] = xp_d_cx * fx + cx;
uv[1] = yp_d_cy * fy + cy;
return true;
}
__device__
static void cuda_transform_point_to_point(float to_point[3], const struct cuda_align::cuda_extrinsics * extrin, const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[1] * from_point[1] + extrin->rotation[2] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[3] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[5] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[6] * from_point[0] + extrin->rotation[7] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__global__
void kernel_color_to_depth(uint8_t* aligned_out, const uint16_t* depth_in, const uint8_t* color_in, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin, const cuda_align::cuda_extrinsics* depth_to_color, const float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
//int depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x >= 0 && depth_x < depth_intrin->width && depth_y >= 0 && depth_y < depth_intrin->height)
{
float uv[2] = { depth_x ,depth_y };
float xyz[3];
const float depth_value = depth_in[depth_pixel_index] * depth_scale;
if (depth_value == 0)
return;
cuda_deproject_pixel_to_point_with_distortion(depth_intrin, uv, xyz, depth_value);
float target_xyz[3];
cuda_transform_point_to_point(target_xyz, depth_to_color, xyz);
if (target_xyz[2] <= 0.f)
{
return;
}
float xy_for_projection[2];
xy_for_projection[0] = target_xyz[0] / target_xyz[2];
xy_for_projection[1] = target_xyz[1] / target_xyz[2];
float target_uv[2] = { -1.f,-1.f };
cuda_project_pixel_to_point_with_distortion(color_intrin, xy_for_projection, target_uv);
const int target_x = target_uv[0] + 0.5f;
const int target_y = target_uv[1] + 0.5f;
if (target_x >= 0 && target_x < color_intrin->width && target_y >= 0 && target_y < color_intrin->height)
{
const int from_offset = 3 * depth_pixel_index;
const int to_offset = 3 * (target_y*color_intrin->width + target_x);
aligned_out[from_offset + 0] = color_in[to_offset + 0];
aligned_out[from_offset + 1] = color_in[to_offset + 1];
aligned_out[from_offset + 2] = color_in[to_offset + 2];
}
}
}
void cuda_k4a_align::align_color_to_depth(uint8_t* aligned_out
, const uint16_t* depth_in
, const uint8_t* color_in
, float depth_scale
, const k4a_calibration_t& calibration
)
{
cuda_align::cuda_intrinsics depth_intrinsic(calibration.depth_camera_calibration);
cuda_align::cuda_intrinsics color_intrinsic(calibration.color_camera_calibration);
cuda_align::cuda_extrinsics depth_to_color(calibration.extrinsics[K4A_CALIBRATION_TYPE_DEPTH][K4A_CALIBRATION_TYPE_COLOR]);
const int depth_pixel_count = depth_intrinsic.width * depth_intrinsic.height;
const int color_pixel_count = color_intrinsic.width * color_intrinsic.height;
const int aligned_pixel_count = depth_pixel_count;
const int depth_byte_size = depth_pixel_count * sizeof(uint16_t);
const int color_byte_size = color_pixel_count * sizeof(uint8_t) * 3;
const int aligned_byte_size = aligned_pixel_count * sizeof(uint8_t) * 3;
// allocate and copy objects to cuda device memory
if (!d_depth_intrinsics) d_depth_intrinsics = make_device_copy(depth_intrinsic);
if (!d_color_intrinsics) d_color_intrinsics = make_device_copy(color_intrinsic);
if (!d_depth_color_extrinsics) d_depth_color_extrinsics = make_device_copy(depth_to_color);
if (!d_depth_in) d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
hipMemcpy(d_depth_in.get(), depth_in, depth_byte_size, hipMemcpyHostToDevice);
if (!d_color_in) d_color_in = alloc_dev<uint8_t>(color_pixel_count * 3);
hipMemcpy(d_color_in.get(), color_in, color_byte_size, hipMemcpyHostToDevice);
if (!d_aligned_out) d_aligned_out = alloc_dev<uint8_t>(aligned_byte_size);
hipMemset(d_aligned_out.get(), 0, aligned_byte_size);
// config threads
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(divUp(depth_intrinsic.width, threads.x), divUp(depth_intrinsic.height, threads.y));
kernel_color_to_depth << <depth_blocks, threads >> > (d_aligned_out.get(), d_depth_in.get(), d_color_in.get(),
d_depth_intrinsics.get(), d_color_intrinsics.get(), d_depth_color_extrinsics.get(), depth_scale);
hipDeviceSynchronize();
hipMemcpy(aligned_out, d_aligned_out.get(), aligned_byte_size, hipMemcpyDeviceToHost);
}
__device__
void kernel_transfer_pixels(int2* mapped_pixels, const cuda_align::cuda_intrinsics* depth_intrin,
const cuda_align::cuda_intrinsics* color_intrin, const cuda_align::cuda_extrinsics* depth_to_color, float depth_val, int depth_x, int depth_y, int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
if (mapped_index >= depth_size * 2)
return;
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val == 0)
{
mapped_pixels[mapped_index] = { -1, -1 };
return;
}
//// Map the top-left corner of the depth pixel onto the color image
float depth_pixel[2] = { depth_x + shift, depth_y + shift }, depth_point[3], color_point[3], color_pixel[2];
//cuda_deproject_pixel_to_point(depth_point, depth_intrin, depth_pixel, depth_val);
cuda_deproject_pixel_to_point_with_distortion(depth_intrin, depth_pixel, depth_point, depth_val);
cuda_transform_point_to_point(color_point, depth_to_color, depth_point);
//cuda_project_point_to_pixel(color_pixel, color_intrin, color_point);
float normalized_pts[2];
normalized_pts[0] = color_point[0] / color_point[2];
normalized_pts[1] = color_point[1] / color_point[2];
cuda_project_pixel_to_point_with_distortion(color_intrin, normalized_pts, color_pixel);
mapped_pixels[mapped_index].x = static_cast<int>(color_pixel[0] + 0.5f);
mapped_pixels[mapped_index].y = static_cast<int>(color_pixel[1] + 0.5f);
}
__global__
void kernel_map_depth_to_color(int2* mapped_pixels, const uint16_t* depth_in, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin,
const cuda_align::cuda_extrinsics* depth_to_color, float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels, depth_intrin, color_intrin, depth_to_color, depth_val, depth_x, depth_y, blockIdx.z);
}
__global__
void kernel_depth_to_color(uint16_t* aligned_out, const uint16_t* depth_in, const int2* mapped_pixels, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= color_intrin->width || p1.y >= color_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the color image
unsigned int new_val = depth_in[depth_pixel_index];
unsigned int* arr = (unsigned int*)aligned_out;
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto color_pixel_index = y * color_intrin->width + x;
new_val = new_val << 16 | new_val;
atomicMin(&arr[color_pixel_index / 2], new_val);
}
}
}
__global__
void kernel_replace_to_zero(uint16_t* aligned_out, const cuda_align::cuda_intrinsics* color_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto color_pixel_index = y * color_intrin->width + x;
if (aligned_out[color_pixel_index] == 0xffff)
aligned_out[color_pixel_index] = 0;
}
void cuda_k4a_align::align_depth_to_color(uint16_t* aligned_out, const uint16_t* depth_in,
float depth_scale, const k4a_calibration_t& calibration)
{
cuda_align::cuda_intrinsics depth_intrinsic(calibration.depth_camera_calibration);
cuda_align::cuda_intrinsics color_intrinsic(calibration.color_camera_calibration);
cuda_align::cuda_extrinsics depth_to_color(calibration.extrinsics[K4A_CALIBRATION_TYPE_DEPTH][K4A_CALIBRATION_TYPE_COLOR]);
int depth_pixel_count = depth_intrinsic.width * depth_intrinsic.height;
int color_pixel_count = color_intrinsic.width * color_intrinsic.height;
int aligned_pixel_count = color_pixel_count;
int depth_byte_size = depth_pixel_count * 2;
int aligned_byte_size = aligned_pixel_count * 2;
// allocate and copy objects to cuda device memory
if (!d_depth_intrinsics) d_depth_intrinsics = make_device_copy(depth_intrinsic);
if (!d_color_intrinsics) d_color_intrinsics = make_device_copy(color_intrinsic);
if (!d_depth_color_extrinsics) d_depth_color_extrinsics = make_device_copy(depth_to_color);
if (!d_depth_in) d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
hipMemcpy(d_depth_in.get(), depth_in, depth_byte_size, hipMemcpyHostToDevice);
if (!d_aligned_out) d_aligned_out = alloc_dev<unsigned char>(aligned_byte_size);
hipMemset(d_aligned_out.get(), 0xff, aligned_byte_size);
if (!d_pixel_map) d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(divUp(depth_intrinsic.width, threads.x), divUp(depth_intrinsic.height, threads.y));
dim3 color_blocks(divUp(color_intrinsic.width, threads.x), divUp(color_intrinsic.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
kernel_map_depth_to_color << <mapping_blocks, threads >> > (d_pixel_map.get(), d_depth_in.get(), d_depth_intrinsics.get(),
d_color_intrinsics.get(), d_depth_color_extrinsics.get(), depth_scale);
kernel_depth_to_color << <depth_blocks, threads >> > ((uint16_t*)d_aligned_out.get(), d_depth_in.get(), d_pixel_map.get(),
d_depth_intrinsics.get(), d_color_intrinsics.get());
kernel_replace_to_zero << <color_blocks, threads >> > ((uint16_t*)d_aligned_out.get(), d_color_intrinsics.get());
hipDeviceSynchronize();
hipMemcpy(aligned_out, d_aligned_out.get(), aligned_pixel_count * sizeof(int16_t), hipMemcpyDeviceToHost);
}
void cuda_k4a_align::release()
{
release_memory(d_depth_in);
release_memory(d_color_in);
release_memory(d_aligned_out);
release_memory(d_pixel_map);
release_memory(d_color_intrinsics);
release_memory(d_depth_intrinsics);
release_memory(d_depth_color_extrinsics);
} | 1ae93f2ab11c9bfa59d7346dfc15c3d4042dae3f.cu | #include "cuda_k4a_align.h"
#define CUDA_THREADS_PER_BLOCK 16
static inline int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
template<typename T>
std::shared_ptr<T> make_device_copy(T obj)
{
T* d_data;
auto res = cudaMalloc(&d_data, sizeof(T));
if (res != cudaSuccess)
throw std::runtime_error("cudaMalloc failed status: " + res);
cudaMemcpy(d_data, &obj, sizeof(T), cudaMemcpyHostToDevice);
return std::shared_ptr<T>(d_data, [](T* data) { cudaFree(data); });
}
template<typename T>
std::shared_ptr<T> alloc_dev(int elements)
{
T* d_data;
auto res = cudaMalloc(&d_data, sizeof(T) * elements);
if (res != cudaSuccess)
throw std::runtime_error("cudaMalloc failed status: " + res);
return std::shared_ptr<T>(d_data, [](T* p) { cudaFree(p); });
}
template<class T>
void release_memory(T& obj)
{
obj = nullptr;
}
__device__
static bool cuda_project_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float xy[2],
float uv[2],
int& valid,
float J_xy[2 * 2])
{
float cx = camera_calibration->cx;
float cy = camera_calibration->cy;
float fx = camera_calibration->fx;
float fy = camera_calibration->fy;
float k1 = camera_calibration->k1;
float k2 = camera_calibration->k2;
float k3 = camera_calibration->k3;
float k4 = camera_calibration->k4;
float k5 = camera_calibration->k5;
float k6 = camera_calibration->k6;
float codx = camera_calibration->codx; // center of distortion is set to 0 for Brown Conrady model
float cody = camera_calibration->cody;
float p1 = camera_calibration->p1;
float p2 = camera_calibration->p2;
float max_radius_for_projection = camera_calibration->metric_radius * camera_calibration->metric_radius;
valid = 1;
float xp = xy[0] - codx;
float yp = xy[1] - cody;
float xp2 = xp * xp;
float yp2 = yp * yp;
float xyp = xp * yp;
float rs = xp2 + yp2;
if (rs > max_radius_for_projection)
{
valid = 0;
return true;
}
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + k1 * rs + k2 * rss + k3 * rsc;
float b = 1.f + k4 * rs + k5 * rss + k6 * rsc;
float bi;
if (b != 0.f)
{
bi = 1.f / b;
}
else
{
bi = 1.f;
}
float d = a * bi;
float xp_d = xp * d;
float yp_d = yp * d;
float rs_2xp2 = rs + 2.f * xp2;
float rs_2yp2 = rs + 2.f * yp2;
xp_d += rs_2xp2 * p2 + 2.f * xyp * p1;
yp_d += rs_2yp2 * p1 + 2.f * xyp * p2;
float xp_d_cx = xp_d + codx;
float yp_d_cy = yp_d + cody;
uv[0] = xp_d_cx * fx + cx;
uv[1] = yp_d_cy * fy + cy;
/*if (J_xy == 0)
{
return true;
}*/
// compute Jacobian matrix
float dudrs = k1 + 2.f * k2 * rs + 3.f * k3 * rss;
// compute d(b)/d(r^2)
float dvdrs = k4 + 2.f * k5 * rs + 3.f * k6 * rss;
float bis = bi * bi;
float dddrs = (dudrs * b - a * dvdrs) * bis;
float dddrs_2 = dddrs * 2.f;
float xp_dddrs_2 = xp * dddrs_2;
float yp_xp_dddrs_2 = yp * xp_dddrs_2;
J_xy[0] = fx * (d + xp * xp_dddrs_2 + 6.f * xp * p2 + 2.f * yp * p1);
J_xy[1] = fx * (yp_xp_dddrs_2 + 2.f * yp * p2 + 2.f * xp * p1);
J_xy[2] = fy * (yp_xp_dddrs_2 + 2.f * xp * p1 + 2.f * yp * p2);
J_xy[3] = fy * (d + yp * yp * dddrs_2 + 6.f * yp * p1 + 2.f * xp * p2);
return true;
}
__device__
static bool cuda_deproject_pixel_to_point_with_distortion_iterative(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float uv[2], float xy[3], int& valid, unsigned int max_passes)
{
valid = 1;
float Jinv[2 * 2];
float best_xy[2] = { 0.f, 0.f };
float best_err = FLT_MAX;
for (unsigned int pass = 0; pass < max_passes; ++pass)
{
float p[2];
float J[2 * 2];
if (cuda_project_pixel_to_point_with_distortion(camera_calibration, xy, p, valid, J) == false)
{
return false;
}
if (valid == 0)
{
return true;
}
float err_x = uv[0] - p[0];
float err_y = uv[1] - p[1];
float err = err_x * err_x + err_y * err_y;
if (err >= best_err)
{
xy[0] = best_xy[0];
xy[1] = best_xy[1];
break;
}
best_err = err;
best_xy[0] = xy[0];
best_xy[1] = xy[1];
float detJ = J[0] * J[3] - J[1] * J[2];
float inv_detJ = 1.f / detJ;
Jinv[0] = inv_detJ * J[3];
Jinv[3] = inv_detJ * J[0];
Jinv[1] = -inv_detJ * J[1];
Jinv[2] = -inv_detJ * J[2];
if (pass + 1 == max_passes || best_err < 1e-22f)
{
break;
}
float dx = Jinv[0] * err_x + Jinv[1] * err_y;
float dy = Jinv[2] * err_x + Jinv[3] * err_y;
xy[0] += dx;
xy[1] += dy;
}
if (best_err > 1e-6f)
{
valid = 0;
}
return true;
}
__device__
static void cuda_deproject_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float uv[2], float xy[3], float depth)
{
float xp_d = (uv[0] - camera_calibration->cx) / camera_calibration->fx - camera_calibration->codx;
float yp_d = (uv[1] - camera_calibration->cy) / camera_calibration->fy - camera_calibration->cody;
float rs = xp_d * xp_d + yp_d * yp_d;
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + camera_calibration->k1 * rs + camera_calibration->k2 * rss + camera_calibration->k3 * rsc;
float b = 1.f + camera_calibration->k4 * rs + camera_calibration->k5 * rss + camera_calibration->k6 * rsc;
float ai;
if (a != 0.f)
{
ai = 1.f / a;
}
else
{
ai = 1.f;
}
float di = ai * b;
float x = xp_d * di;
float y = yp_d * di;
// approximate correction for tangential params
float two_xy = 2.f * x * y;
float xx = x * x;
float yy = y * y;
x -= (yy + 3.f * xx) * camera_calibration->p2 + two_xy * camera_calibration->p1;
y -= (xx + 3.f * yy) * camera_calibration->p1 + two_xy * camera_calibration->p2;
// add on center of distortion
x += camera_calibration->codx;
y += camera_calibration->cody;
xy[0] = x;
xy[1] = y;
xy[2] = depth;
int valid;
if (cuda_deproject_pixel_to_point_with_distortion_iterative(camera_calibration, uv, xy, valid, 20))
{
xy[0] *= depth;
xy[1] *= depth;
xy[2] = depth;
}
else
{
xy[0] = xy[1] = xy[2] = 0.0f;
}
}
__device__
static bool cuda_project_pixel_to_point_with_distortion(const struct cuda_align::cuda_intrinsics * camera_calibration,
const float xy[2],
float uv[2])
{
float cx = camera_calibration->cx;
float cy = camera_calibration->cy;
float fx = camera_calibration->fx;
float fy = camera_calibration->fy;
float k1 = camera_calibration->k1;
float k2 = camera_calibration->k2;
float k3 = camera_calibration->k3;
float k4 = camera_calibration->k4;
float k5 = camera_calibration->k5;
float k6 = camera_calibration->k6;
float codx = camera_calibration->codx; // center of distortion is set to 0 for Brown Conrady model
float cody = camera_calibration->cody;
float p1 = camera_calibration->p1;
float p2 = camera_calibration->p2;
float max_radius_for_projection = camera_calibration->metric_radius * camera_calibration->metric_radius;
float xp = xy[0] - codx;
float yp = xy[1] - cody;
float xp2 = xp * xp;
float yp2 = yp * yp;
float xyp = xp * yp;
float rs = xp2 + yp2;
if (rs > max_radius_for_projection)
{
return true;
}
float rss = rs * rs;
float rsc = rss * rs;
float a = 1.f + k1 * rs + k2 * rss + k3 * rsc;
float b = 1.f + k4 * rs + k5 * rss + k6 * rsc;
float bi;
if (b != 0.f)
{
bi = 1.f / b;
}
else
{
bi = 1.f;
}
float d = a * bi;
float xp_d = xp * d;
float yp_d = yp * d;
float rs_2xp2 = rs + 2.f * xp2;
float rs_2yp2 = rs + 2.f * yp2;
xp_d += rs_2xp2 * p2 + 2.f * xyp * p1;
yp_d += rs_2yp2 * p1 + 2.f * xyp * p2;
float xp_d_cx = xp_d + codx;
float yp_d_cy = yp_d + cody;
uv[0] = xp_d_cx * fx + cx;
uv[1] = yp_d_cy * fy + cy;
return true;
}
__device__
static void cuda_transform_point_to_point(float to_point[3], const struct cuda_align::cuda_extrinsics * extrin, const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[1] * from_point[1] + extrin->rotation[2] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[3] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[5] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[6] * from_point[0] + extrin->rotation[7] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__global__
void kernel_color_to_depth(uint8_t* aligned_out, const uint16_t* depth_in, const uint8_t* color_in, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin, const cuda_align::cuda_extrinsics* depth_to_color, const float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
//int depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x >= 0 && depth_x < depth_intrin->width && depth_y >= 0 && depth_y < depth_intrin->height)
{
float uv[2] = { depth_x ,depth_y };
float xyz[3];
const float depth_value = depth_in[depth_pixel_index] * depth_scale;
if (depth_value == 0)
return;
cuda_deproject_pixel_to_point_with_distortion(depth_intrin, uv, xyz, depth_value);
float target_xyz[3];
cuda_transform_point_to_point(target_xyz, depth_to_color, xyz);
if (target_xyz[2] <= 0.f)
{
return;
}
float xy_for_projection[2];
xy_for_projection[0] = target_xyz[0] / target_xyz[2];
xy_for_projection[1] = target_xyz[1] / target_xyz[2];
float target_uv[2] = { -1.f,-1.f };
cuda_project_pixel_to_point_with_distortion(color_intrin, xy_for_projection, target_uv);
const int target_x = target_uv[0] + 0.5f;
const int target_y = target_uv[1] + 0.5f;
if (target_x >= 0 && target_x < color_intrin->width && target_y >= 0 && target_y < color_intrin->height)
{
const int from_offset = 3 * depth_pixel_index;
const int to_offset = 3 * (target_y*color_intrin->width + target_x);
aligned_out[from_offset + 0] = color_in[to_offset + 0];
aligned_out[from_offset + 1] = color_in[to_offset + 1];
aligned_out[from_offset + 2] = color_in[to_offset + 2];
}
}
}
void cuda_k4a_align::align_color_to_depth(uint8_t* aligned_out
, const uint16_t* depth_in
, const uint8_t* color_in
, float depth_scale
, const k4a_calibration_t& calibration
)
{
cuda_align::cuda_intrinsics depth_intrinsic(calibration.depth_camera_calibration);
cuda_align::cuda_intrinsics color_intrinsic(calibration.color_camera_calibration);
cuda_align::cuda_extrinsics depth_to_color(calibration.extrinsics[K4A_CALIBRATION_TYPE_DEPTH][K4A_CALIBRATION_TYPE_COLOR]);
const int depth_pixel_count = depth_intrinsic.width * depth_intrinsic.height;
const int color_pixel_count = color_intrinsic.width * color_intrinsic.height;
const int aligned_pixel_count = depth_pixel_count;
const int depth_byte_size = depth_pixel_count * sizeof(uint16_t);
const int color_byte_size = color_pixel_count * sizeof(uint8_t) * 3;
const int aligned_byte_size = aligned_pixel_count * sizeof(uint8_t) * 3;
// allocate and copy objects to cuda device memory
if (!d_depth_intrinsics) d_depth_intrinsics = make_device_copy(depth_intrinsic);
if (!d_color_intrinsics) d_color_intrinsics = make_device_copy(color_intrinsic);
if (!d_depth_color_extrinsics) d_depth_color_extrinsics = make_device_copy(depth_to_color);
if (!d_depth_in) d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
cudaMemcpy(d_depth_in.get(), depth_in, depth_byte_size, cudaMemcpyHostToDevice);
if (!d_color_in) d_color_in = alloc_dev<uint8_t>(color_pixel_count * 3);
cudaMemcpy(d_color_in.get(), color_in, color_byte_size, cudaMemcpyHostToDevice);
if (!d_aligned_out) d_aligned_out = alloc_dev<uint8_t>(aligned_byte_size);
cudaMemset(d_aligned_out.get(), 0, aligned_byte_size);
// config threads
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(divUp(depth_intrinsic.width, threads.x), divUp(depth_intrinsic.height, threads.y));
kernel_color_to_depth << <depth_blocks, threads >> > (d_aligned_out.get(), d_depth_in.get(), d_color_in.get(),
d_depth_intrinsics.get(), d_color_intrinsics.get(), d_depth_color_extrinsics.get(), depth_scale);
cudaDeviceSynchronize();
cudaMemcpy(aligned_out, d_aligned_out.get(), aligned_byte_size, cudaMemcpyDeviceToHost);
}
__device__
void kernel_transfer_pixels(int2* mapped_pixels, const cuda_align::cuda_intrinsics* depth_intrin,
const cuda_align::cuda_intrinsics* color_intrin, const cuda_align::cuda_extrinsics* depth_to_color, float depth_val, int depth_x, int depth_y, int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
if (mapped_index >= depth_size * 2)
return;
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val == 0)
{
mapped_pixels[mapped_index] = { -1, -1 };
return;
}
//// Map the top-left corner of the depth pixel onto the color image
float depth_pixel[2] = { depth_x + shift, depth_y + shift }, depth_point[3], color_point[3], color_pixel[2];
//cuda_deproject_pixel_to_point(depth_point, depth_intrin, depth_pixel, depth_val);
cuda_deproject_pixel_to_point_with_distortion(depth_intrin, depth_pixel, depth_point, depth_val);
cuda_transform_point_to_point(color_point, depth_to_color, depth_point);
//cuda_project_point_to_pixel(color_pixel, color_intrin, color_point);
float normalized_pts[2];
normalized_pts[0] = color_point[0] / color_point[2];
normalized_pts[1] = color_point[1] / color_point[2];
cuda_project_pixel_to_point_with_distortion(color_intrin, normalized_pts, color_pixel);
mapped_pixels[mapped_index].x = static_cast<int>(color_pixel[0] + 0.5f);
mapped_pixels[mapped_index].y = static_cast<int>(color_pixel[1] + 0.5f);
}
__global__
void kernel_map_depth_to_color(int2* mapped_pixels, const uint16_t* depth_in, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin,
const cuda_align::cuda_extrinsics* depth_to_color, float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels, depth_intrin, color_intrin, depth_to_color, depth_val, depth_x, depth_y, blockIdx.z);
}
__global__
void kernel_depth_to_color(uint16_t* aligned_out, const uint16_t* depth_in, const int2* mapped_pixels, const cuda_align::cuda_intrinsics* depth_intrin, const cuda_align::cuda_intrinsics* color_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= color_intrin->width || p1.y >= color_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the color image
unsigned int new_val = depth_in[depth_pixel_index];
unsigned int* arr = (unsigned int*)aligned_out;
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto color_pixel_index = y * color_intrin->width + x;
new_val = new_val << 16 | new_val;
atomicMin(&arr[color_pixel_index / 2], new_val);
}
}
}
__global__
void kernel_replace_to_zero(uint16_t* aligned_out, const cuda_align::cuda_intrinsics* color_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto color_pixel_index = y * color_intrin->width + x;
if (aligned_out[color_pixel_index] == 0xffff)
aligned_out[color_pixel_index] = 0;
}
void cuda_k4a_align::align_depth_to_color(uint16_t* aligned_out, const uint16_t* depth_in,
float depth_scale, const k4a_calibration_t& calibration)
{
cuda_align::cuda_intrinsics depth_intrinsic(calibration.depth_camera_calibration);
cuda_align::cuda_intrinsics color_intrinsic(calibration.color_camera_calibration);
cuda_align::cuda_extrinsics depth_to_color(calibration.extrinsics[K4A_CALIBRATION_TYPE_DEPTH][K4A_CALIBRATION_TYPE_COLOR]);
int depth_pixel_count = depth_intrinsic.width * depth_intrinsic.height;
int color_pixel_count = color_intrinsic.width * color_intrinsic.height;
int aligned_pixel_count = color_pixel_count;
int depth_byte_size = depth_pixel_count * 2;
int aligned_byte_size = aligned_pixel_count * 2;
// allocate and copy objects to cuda device memory
if (!d_depth_intrinsics) d_depth_intrinsics = make_device_copy(depth_intrinsic);
if (!d_color_intrinsics) d_color_intrinsics = make_device_copy(color_intrinsic);
if (!d_depth_color_extrinsics) d_depth_color_extrinsics = make_device_copy(depth_to_color);
if (!d_depth_in) d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
cudaMemcpy(d_depth_in.get(), depth_in, depth_byte_size, cudaMemcpyHostToDevice);
if (!d_aligned_out) d_aligned_out = alloc_dev<unsigned char>(aligned_byte_size);
cudaMemset(d_aligned_out.get(), 0xff, aligned_byte_size);
if (!d_pixel_map) d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(divUp(depth_intrinsic.width, threads.x), divUp(depth_intrinsic.height, threads.y));
dim3 color_blocks(divUp(color_intrinsic.width, threads.x), divUp(color_intrinsic.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
kernel_map_depth_to_color << <mapping_blocks, threads >> > (d_pixel_map.get(), d_depth_in.get(), d_depth_intrinsics.get(),
d_color_intrinsics.get(), d_depth_color_extrinsics.get(), depth_scale);
kernel_depth_to_color << <depth_blocks, threads >> > ((uint16_t*)d_aligned_out.get(), d_depth_in.get(), d_pixel_map.get(),
d_depth_intrinsics.get(), d_color_intrinsics.get());
kernel_replace_to_zero << <color_blocks, threads >> > ((uint16_t*)d_aligned_out.get(), d_color_intrinsics.get());
cudaDeviceSynchronize();
cudaMemcpy(aligned_out, d_aligned_out.get(), aligned_pixel_count * sizeof(int16_t), cudaMemcpyDeviceToHost);
}
void cuda_k4a_align::release()
{
release_memory(d_depth_in);
release_memory(d_color_in);
release_memory(d_aligned_out);
release_memory(d_pixel_map);
release_memory(d_color_intrinsics);
release_memory(d_depth_intrinsics);
release_memory(d_depth_color_extrinsics);
} |
94edb48e656968185943c5e8ab0fbf4b6d51adb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.device().type() == at::DeviceType::CUDA);
TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch());
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
int64_t size_bytes = nelements * dtype.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
TORCH_CHECK(
!(options.has_memory_format() && optional_memory_format.has_value()),
"Cannot set memory_format both in TensorOptions and explicit argument; please delete "
"the redundant setter.");
auto memory_format = options.memory_format_opt().value_or(optional_memory_format.value_or(MemoryFormat::Contiguous));
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
| 94edb48e656968185943c5e8ab0fbf4b6d51adb0.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.device().type() == at::DeviceType::CUDA);
TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch());
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
int64_t size_bytes = nelements * dtype.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
TORCH_CHECK(
!(options.has_memory_format() && optional_memory_format.has_value()),
"Cannot set memory_format both in TensorOptions and explicit argument; please delete "
"the redundant setter.");
auto memory_format = options.memory_format_opt().value_or(optional_memory_format.value_or(MemoryFormat::Contiguous));
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
|
84eef0e49db795a8dd7441287829759e51c305fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), (2*pad_size) * sizeof(double), 0, dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01)hipLaunchKernelGGL(( tasgpu_m11_to_01<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_eval_sharedpoints<double, 32>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, true>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, false>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
hipLaunchKernelGGL(( tasgpu_cudaTgemm<double, 32, 96>), dim3(blocks), dim3(1024), 0, 0, M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_sparse_matmul<double, 64>), dim3(blocks), dim3(64), 0, 0, M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_fill<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_sparse_to_dense<double, 64>), dim3(num_blocks), dim3(64), 0, 0, num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
| 84eef0e49db795a8dd7441287829759e51c305fa.cu | /*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS, (2*pad_size) * sizeof(double)>>>(dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01) tasgpu_m11_to_01<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2: tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
tasgpu_dseq_eval_sharedpoints<double, 32><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
tasgpu_dfor_eval_sharedpoints<double, 32, true><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
tasgpu_dfor_eval_sharedpoints<double, 32, false><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
tasgpu_cudaTgemm<double, 32, 96><<<blocks, 1024>>>(M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
tasgpu_sparse_matmul<double, 64><<<blocks, 64>>>(M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_fill<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_sparse_to_dense<double, 64><<<num_blocks, 64>>>(num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
|
ccbbce4a8485e1db200f2e0d5f43f9105f907148.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "data.h"
#include "ordergraph_kernel.cu"
const int HIGHEST = 3;
int taskperthr = 1;
int sizepernode;
int ITER = 100;
// global var
float preScore = FLT_MIN;
float maxScore[HIGHEST] = { FLT_MIN };
bool orders[NODE_N][NODE_N];
bool preOrders[NODE_N][NODE_N];
bool preGraph[NODE_N][NODE_N];
bool graph[NODE_N][NODE_N];
bool bestGraph[HIGHEST][NODE_N][NODE_N];
float *localscore, *D_localscore, *D_Score, *scores;
float *LG;
bool *D_parent;
int *D_resP, *parents;
void initialize(); //initial orders and data
int genOrders(); //swap
int conCore(float score); //discard new order or not
bool getparent(int *bit, int *pre, int posN, int *parent, int *parN, int time); //get every possible set of parents for a node
void incr(int *bit, int n); //binary code increases 1 each time
void incrS(int *bit, int n); //STATE_N code increases 1 each time
bool getState(int parN, int *state, int time); //get every possible combination of state for a parent set
float logGamma(int N); // log and gamma
float findBestGraph();
void genScore();
int convert(int *parent, int parN);
void sortGraph();
void swap(int a, int b);
void Pre_logGamma();
int findindex(int *arr, int size);
int C(int n, int a);
int main()
{
int c = 0;
clock_t total = 0, start;
hipDeviceSynchronize();
srand(time(NULL));
start = clock();
initialize();
genScore();
total += clock() - start;
for (int i = 0; i < ITER; i++) {
start = clock();
for (int a = 0; a < NODE_N; a++) {
for (int j = 0; j < NODE_N; j++) {
orders[a][j] = preOrders[a][j];
}
}
int tmp = rand() % 6;
for (int j = 0; j < tmp; j++)
genOrders();
float score = findBestGraph();
conCore(score);
total += clock() - start;
//store the top HIGHEST highest orders
if (c < HIGHEST) {
tmp = 1;
for (int j = 0; j < c; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
}
}
if (tmp != 0) {
maxScore[c] = preScore;
for (int a = 0; a < NODE_N; a++) {
for (int b = 0; b < NODE_N; b++) {
bestGraph[c][a][b] = preGraph[a][b];
}
}
c++;
}
} else if (c == HIGHEST) {
sortGraph();
c++;
} else {
tmp = 1;
for (int j = 0; j < HIGHEST; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
break;
}
}
if (tmp != 0 && preScore > maxScore[HIGHEST - 1]) {
maxScore[HIGHEST - 1] = preScore;
for (int a = 0; a < NODE_N; a++) {
for (int b = 0; b < NODE_N; b++) {
bestGraph[HIGHEST - 1][a][b] = preGraph[a][b];
}
}
int b = HIGHEST - 1;
for (int a = HIGHEST - 2; a >= 0; a--) {
if (maxScore[b] > maxScore[a]) {
swap(a, b);
int tmpd = maxScore[a];
maxScore[a] = maxScore[b];
maxScore[b] = tmpd;
b = a;
}
}
}
}
}
free(localscore);
hipFree(D_localscore);
hipFree(D_parent);
free(scores);
free(parents);
hipFree(D_Score);
hipFree(D_resP);
printf("%d,%d,%d,%f\n", STATE_N, NODE_N, DATA_N, (float) total / CLOCKS_PER_SEC);
}
void sortGraph()
{
float max = FLT_MIN;
int maxi;
for (int j = 0; j < HIGHEST - 1; j++) {
max = maxScore[j];
maxi = j;
for (int i = j + 1; i < HIGHEST; i++) {
if (maxScore[i] > max) {
max = maxScore[i];
maxi = i;
}
}
swap(j, maxi);
float tmp = maxScore[j];
maxScore[j] = max;
maxScore[maxi] = tmp;
}
}
void swap(int a, int b)
{
for (int i = 0; i < NODE_N; i++) {
for (int j = 0; j < NODE_N; j++) {
bool tmp = bestGraph[a][i][j];
bestGraph[a][i][j] = bestGraph[b][i][j];
bestGraph[b][i][j] = tmp;
}
}
}
void initialize()
{
int i, j, tmp, a, b, r;
bool tmpd;
tmp = 1;
for (i = 1; i <= 4; i++) {
tmp += C(NODE_N - 1, i);
}
sizepernode = tmp;
tmp *= NODE_N;
localscore = (float *)malloc(tmp * sizeof(float));
for (i = 0; i < tmp; i++)
localscore[i] = 0;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++)
orders[i][j] = 0;
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < i; j++)
orders[i][j] = 1;
}
r = rand() % 10000;
for (i = 0; i < r; i++) {
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmpd = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmpd;
}
for (j = 0; j < NODE_N; j++) {
tmpd = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmpd;
}
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
}
}
}
//generate ramdom order
int genOrders()
{
int a, b, j;
bool tmp;
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmp = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmp;
}
for (j = 0; j < NODE_N; j++) {
tmp = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmp;
}
return 1;
}
//decide leave or discard an order
int conCore(float score)
{
int i, j;
float tmp;
tmp = log((rand() % 100000) / 100000.0);
if (tmp < (score - preScore)) {
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
preGraph[i][j] = graph[i][j];
}
}
preScore = score;
return 1;
}
return 0;
}
void genScore()
{
int *D_data;
float *D_LG;
dim3 grid(sizepernode / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
Pre_logGamma();
hipMalloc((void **)&D_data, NODE_N * DATA_N * sizeof(int));
hipMalloc((void **)&D_localscore, NODE_N * sizepernode * sizeof(float));
hipMalloc((void **)&D_LG, (DATA_N + 2) * sizeof(float));
hipMemset(D_localscore, 0.0, NODE_N * sizepernode * sizeof(float));
hipMemcpy(D_data, data, NODE_N * DATA_N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(D_LG, LG, (DATA_N + 2) * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( genScoreKernel) , dim3(grid), dim3(threads) , 0, 0, sizepernode, D_localscore, D_data, D_LG);
hipMemcpy(localscore, D_localscore, NODE_N * sizepernode * sizeof(float), hipMemcpyDeviceToHost);
hipHostFree(LG);
hipFree(D_LG);
hipFree(D_data);
scores = (float *)malloc((sizepernode / (256 * taskperthr) + 1) * sizeof(float));
parents = (int *)malloc((sizepernode / (256 * taskperthr) + 1) * 4 * sizeof(int));
hipMalloc((void **)&D_Score, (sizepernode / (256 * taskperthr) + 1) * sizeof(float));
hipMalloc((void **)&D_parent, NODE_N * sizeof(bool));
hipMalloc((void **)&D_resP, (sizepernode / (256 * taskperthr) + 1) * 4 * sizeof(int));
}
int convert(int *parent, int parN)
{
int i, j, w = 1, tmp = 0;
j = 0;
for (i = 0; parN > 0 && i <= parent[parN - 1]; i++) {
if (parent[j] == i) {
j++;
tmp += w;
}
w *= 2;
}
return tmp;
}
void Pre_logGamma()
{
LG = (float *)malloc((DATA_N + 2) * sizeof(float));
LG[1] = log(1.0);
float i;
for (i = 2; i <= DATA_N + 1; i++) {
LG[(int)i] = LG[(int)i - 1] + log((float)i);
}
}
void incr(int *bit, int n)
{
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
void incrS(int *bit, int n)
{
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
bool getState(int parN, int *state, int time)
{
int j = 1;
j = pow(STATE_N, (float)parN) - 1;
if (time > j)
return false;
if (time >= 1)
incrS(state, 0);
return true;
}
bool getparent(int *bit, int *pre, int posN, int *parent, int *parN, int time)
{
int i, j = 1;
*parN = 0;
if (time == 0)
return true;
for (i = 0; i < posN; i++) {
j = j * 2;
}
j--;
if (time > j)
return false;
incr(bit, 0);
for (i = 0; i < posN; i++) {
if (bit[i] == 1) {
parent[(*parN)++] = pre[i];
}
}
return true;
}
float findBestGraph()
{
float bestls = FLT_MIN;
int bestparent[5];
int bestpN, total;
int node, index;
int pre[NODE_N] = { 0 };
int parent[NODE_N] = { 0 };
int posN = 0, i, j, parN, tmp, k, l;
float ls = FLT_MIN, score = 0;
int blocknum;
for (i = 0; i < NODE_N; i++)
for (j = 0; j < NODE_N; j++)
graph[i][j] = 0;
for (node = 0; node < NODE_N; node++) {
bestls = FLT_MIN;
posN = 0;
for (i = 0; i < NODE_N; i++) {
if (orders[node][i] == 1) {
pre[posN++] = i;
}
}
if (posN >= 0) {
total = C(posN, 4) + C(posN, 3) + C(posN, 2) + posN + 1;
taskperthr = 1;
blocknum = total / (256 * taskperthr) + 1;
hipMemset(D_resP, 0, blocknum * 4 * sizeof(int));
hipMemset(D_Score, FLT_MIN, blocknum * sizeof(float));
hipMemcpy(D_parent, orders[node], NODE_N * sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computeKernel) , dim3(blocknum), dim3(256), 256 * sizeof(float) , 0, taskperthr, sizepernode, D_localscore, D_parent, node, total, D_Score, D_resP);
hipMemcpy(parents, D_resP, blocknum * 4 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(scores, D_Score, blocknum * sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < blocknum; i++) {
if (scores[i] > bestls) {
bestls = scores[i];
parN = 0;
for (tmp = 0; tmp < 4; tmp++) {
if (parents[i * 4 + tmp] < 0)
break;
bestparent[tmp] = parents[i * 4 + tmp];
parN++;
}
bestpN = parN;
}
}
} else {
if (posN >= 4) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
for (l = k + 1; l < posN; l++) {
parN = 4;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
if (pre[l] > node)
parent[4] = pre[l];
else
parent[4] = pre[l] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
}
if (posN >= 3) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
parN = 3;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
if (posN >= 2) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
parN = 2;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
if (posN >= 1) {
for (i = 0; i < posN; i++) {
parN = 1;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
parN = 0;
index = sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = 0;
}
}
if (bestls > FLT_MIN) {
for (i = 0; i < bestpN; i++) {
if (bestparent[i] < node)
graph[node][bestparent[i] - 1] = 1;
else
graph[node][bestparent[i]] = 1;
}
score += bestls;
}
}
return score;
}
int findindex(int *arr, int size)
{ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
int C(int n, int a)
{
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
| ccbbce4a8485e1db200f2e0d5f43f9105f907148.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <cuda_runtime.h>
#include "data.h"
#include "ordergraph_kernel.cu"
const int HIGHEST = 3;
int taskperthr = 1;
int sizepernode;
int ITER = 100;
// global var
float preScore = FLT_MIN;
float maxScore[HIGHEST] = { FLT_MIN };
bool orders[NODE_N][NODE_N];
bool preOrders[NODE_N][NODE_N];
bool preGraph[NODE_N][NODE_N];
bool graph[NODE_N][NODE_N];
bool bestGraph[HIGHEST][NODE_N][NODE_N];
float *localscore, *D_localscore, *D_Score, *scores;
float *LG;
bool *D_parent;
int *D_resP, *parents;
void initialize(); //initial orders and data
int genOrders(); //swap
int conCore(float score); //discard new order or not
bool getparent(int *bit, int *pre, int posN, int *parent, int *parN, int time); //get every possible set of parents for a node
void incr(int *bit, int n); //binary code increases 1 each time
void incrS(int *bit, int n); //STATE_N code increases 1 each time
bool getState(int parN, int *state, int time); //get every possible combination of state for a parent set
float logGamma(int N); // log and gamma
float findBestGraph();
void genScore();
int convert(int *parent, int parN);
void sortGraph();
void swap(int a, int b);
void Pre_logGamma();
int findindex(int *arr, int size);
int C(int n, int a);
int main()
{
int c = 0;
clock_t total = 0, start;
cudaDeviceSynchronize();
srand(time(NULL));
start = clock();
initialize();
genScore();
total += clock() - start;
for (int i = 0; i < ITER; i++) {
start = clock();
for (int a = 0; a < NODE_N; a++) {
for (int j = 0; j < NODE_N; j++) {
orders[a][j] = preOrders[a][j];
}
}
int tmp = rand() % 6;
for (int j = 0; j < tmp; j++)
genOrders();
float score = findBestGraph();
conCore(score);
total += clock() - start;
//store the top HIGHEST highest orders
if (c < HIGHEST) {
tmp = 1;
for (int j = 0; j < c; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
}
}
if (tmp != 0) {
maxScore[c] = preScore;
for (int a = 0; a < NODE_N; a++) {
for (int b = 0; b < NODE_N; b++) {
bestGraph[c][a][b] = preGraph[a][b];
}
}
c++;
}
} else if (c == HIGHEST) {
sortGraph();
c++;
} else {
tmp = 1;
for (int j = 0; j < HIGHEST; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
break;
}
}
if (tmp != 0 && preScore > maxScore[HIGHEST - 1]) {
maxScore[HIGHEST - 1] = preScore;
for (int a = 0; a < NODE_N; a++) {
for (int b = 0; b < NODE_N; b++) {
bestGraph[HIGHEST - 1][a][b] = preGraph[a][b];
}
}
int b = HIGHEST - 1;
for (int a = HIGHEST - 2; a >= 0; a--) {
if (maxScore[b] > maxScore[a]) {
swap(a, b);
int tmpd = maxScore[a];
maxScore[a] = maxScore[b];
maxScore[b] = tmpd;
b = a;
}
}
}
}
}
free(localscore);
cudaFree(D_localscore);
cudaFree(D_parent);
free(scores);
free(parents);
cudaFree(D_Score);
cudaFree(D_resP);
printf("%d,%d,%d,%f\n", STATE_N, NODE_N, DATA_N, (float) total / CLOCKS_PER_SEC);
}
void sortGraph()
{
float max = FLT_MIN;
int maxi;
for (int j = 0; j < HIGHEST - 1; j++) {
max = maxScore[j];
maxi = j;
for (int i = j + 1; i < HIGHEST; i++) {
if (maxScore[i] > max) {
max = maxScore[i];
maxi = i;
}
}
swap(j, maxi);
float tmp = maxScore[j];
maxScore[j] = max;
maxScore[maxi] = tmp;
}
}
void swap(int a, int b)
{
for (int i = 0; i < NODE_N; i++) {
for (int j = 0; j < NODE_N; j++) {
bool tmp = bestGraph[a][i][j];
bestGraph[a][i][j] = bestGraph[b][i][j];
bestGraph[b][i][j] = tmp;
}
}
}
void initialize()
{
int i, j, tmp, a, b, r;
bool tmpd;
tmp = 1;
for (i = 1; i <= 4; i++) {
tmp += C(NODE_N - 1, i);
}
sizepernode = tmp;
tmp *= NODE_N;
localscore = (float *)malloc(tmp * sizeof(float));
for (i = 0; i < tmp; i++)
localscore[i] = 0;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++)
orders[i][j] = 0;
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < i; j++)
orders[i][j] = 1;
}
r = rand() % 10000;
for (i = 0; i < r; i++) {
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmpd = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmpd;
}
for (j = 0; j < NODE_N; j++) {
tmpd = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmpd;
}
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
}
}
}
//generate ramdom order
int genOrders()
{
int a, b, j;
bool tmp;
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmp = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmp;
}
for (j = 0; j < NODE_N; j++) {
tmp = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmp;
}
return 1;
}
//decide leave or discard an order
int conCore(float score)
{
int i, j;
float tmp;
tmp = log((rand() % 100000) / 100000.0);
if (tmp < (score - preScore)) {
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
preGraph[i][j] = graph[i][j];
}
}
preScore = score;
return 1;
}
return 0;
}
void genScore()
{
int *D_data;
float *D_LG;
dim3 grid(sizepernode / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
Pre_logGamma();
cudaMalloc((void **)&D_data, NODE_N * DATA_N * sizeof(int));
cudaMalloc((void **)&D_localscore, NODE_N * sizepernode * sizeof(float));
cudaMalloc((void **)&D_LG, (DATA_N + 2) * sizeof(float));
cudaMemset(D_localscore, 0.0, NODE_N * sizepernode * sizeof(float));
cudaMemcpy(D_data, data, NODE_N * DATA_N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_LG, LG, (DATA_N + 2) * sizeof(float), cudaMemcpyHostToDevice);
genScoreKernel <<< grid, threads >>> (sizepernode, D_localscore, D_data, D_LG);
cudaMemcpy(localscore, D_localscore, NODE_N * sizepernode * sizeof(float), cudaMemcpyDeviceToHost);
cudaFreeHost(LG);
cudaFree(D_LG);
cudaFree(D_data);
scores = (float *)malloc((sizepernode / (256 * taskperthr) + 1) * sizeof(float));
parents = (int *)malloc((sizepernode / (256 * taskperthr) + 1) * 4 * sizeof(int));
cudaMalloc((void **)&D_Score, (sizepernode / (256 * taskperthr) + 1) * sizeof(float));
cudaMalloc((void **)&D_parent, NODE_N * sizeof(bool));
cudaMalloc((void **)&D_resP, (sizepernode / (256 * taskperthr) + 1) * 4 * sizeof(int));
}
int convert(int *parent, int parN)
{
int i, j, w = 1, tmp = 0;
j = 0;
for (i = 0; parN > 0 && i <= parent[parN - 1]; i++) {
if (parent[j] == i) {
j++;
tmp += w;
}
w *= 2;
}
return tmp;
}
void Pre_logGamma()
{
LG = (float *)malloc((DATA_N + 2) * sizeof(float));
LG[1] = log(1.0);
float i;
for (i = 2; i <= DATA_N + 1; i++) {
LG[(int)i] = LG[(int)i - 1] + log((float)i);
}
}
void incr(int *bit, int n)
{
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
void incrS(int *bit, int n)
{
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
bool getState(int parN, int *state, int time)
{
int j = 1;
j = pow(STATE_N, (float)parN) - 1;
if (time > j)
return false;
if (time >= 1)
incrS(state, 0);
return true;
}
bool getparent(int *bit, int *pre, int posN, int *parent, int *parN, int time)
{
int i, j = 1;
*parN = 0;
if (time == 0)
return true;
for (i = 0; i < posN; i++) {
j = j * 2;
}
j--;
if (time > j)
return false;
incr(bit, 0);
for (i = 0; i < posN; i++) {
if (bit[i] == 1) {
parent[(*parN)++] = pre[i];
}
}
return true;
}
float findBestGraph()
{
float bestls = FLT_MIN;
int bestparent[5];
int bestpN, total;
int node, index;
int pre[NODE_N] = { 0 };
int parent[NODE_N] = { 0 };
int posN = 0, i, j, parN, tmp, k, l;
float ls = FLT_MIN, score = 0;
int blocknum;
for (i = 0; i < NODE_N; i++)
for (j = 0; j < NODE_N; j++)
graph[i][j] = 0;
for (node = 0; node < NODE_N; node++) {
bestls = FLT_MIN;
posN = 0;
for (i = 0; i < NODE_N; i++) {
if (orders[node][i] == 1) {
pre[posN++] = i;
}
}
if (posN >= 0) {
total = C(posN, 4) + C(posN, 3) + C(posN, 2) + posN + 1;
taskperthr = 1;
blocknum = total / (256 * taskperthr) + 1;
cudaMemset(D_resP, 0, blocknum * 4 * sizeof(int));
cudaMemset(D_Score, FLT_MIN, blocknum * sizeof(float));
cudaMemcpy(D_parent, orders[node], NODE_N * sizeof(bool), cudaMemcpyHostToDevice);
computeKernel <<< blocknum, 256, 256 * sizeof(float) >>> (taskperthr, sizepernode, D_localscore, D_parent, node, total, D_Score, D_resP);
cudaMemcpy(parents, D_resP, blocknum * 4 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(scores, D_Score, blocknum * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < blocknum; i++) {
if (scores[i] > bestls) {
bestls = scores[i];
parN = 0;
for (tmp = 0; tmp < 4; tmp++) {
if (parents[i * 4 + tmp] < 0)
break;
bestparent[tmp] = parents[i * 4 + tmp];
parN++;
}
bestpN = parN;
}
}
} else {
if (posN >= 4) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
for (l = k + 1; l < posN; l++) {
parN = 4;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
if (pre[l] > node)
parent[4] = pre[l];
else
parent[4] = pre[l] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
}
if (posN >= 3) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
parN = 3;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
if (posN >= 2) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
parN = 2;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
if (posN >= 1) {
for (i = 0; i < posN; i++) {
parN = 1;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
parN = 0;
index = sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = 0;
}
}
if (bestls > FLT_MIN) {
for (i = 0; i < bestpN; i++) {
if (bestparent[i] < node)
graph[node][bestparent[i] - 1] = 1;
else
graph[node][bestparent[i]] = 1;
}
score += bestls;
}
}
return score;
}
int findindex(int *arr, int size)
{ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
int C(int n, int a)
{
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
|
9325e5f808a5ba3889c9e2cd0ee20d1450e2d754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -fcuda-is-device -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
// RUN: -emit-llvm -o - %s | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: define {{.*}}@_ZN1AC2Ev(ptr noundef nonnull align 8 dereferenceable(8) %this)
// CHECK: store ptr %this, ptr %this.addr.ascast
// CHECK: %this1 = load ptr, ptr %this.addr.ascast
// CHECK: store ptr addrspace(1) {{.*}} @_ZTV1A{{.*}}, ptr %this1
struct A {
__device__ virtual void vf() {}
};
__global__ void kern() {
A a;
}
| 9325e5f808a5ba3889c9e2cd0ee20d1450e2d754.cu | // RUN: %clang_cc1 -fcuda-is-device -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
// RUN: -emit-llvm -o - %s | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: define {{.*}}@_ZN1AC2Ev(ptr noundef nonnull align 8 dereferenceable(8) %this)
// CHECK: store ptr %this, ptr %this.addr.ascast
// CHECK: %this1 = load ptr, ptr %this.addr.ascast
// CHECK: store ptr addrspace(1) {{.*}} @_ZTV1A{{.*}}, ptr %this1
struct A {
__device__ virtual void vf() {}
};
__global__ void kern() {
A a;
}
|
81b474d5465072fc0c08af01244bd83e04aac7be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh"
#include <algorithm>
#include <cmath>
#include "libraries/common/CudaUtils.cuh"
#include "libraries/common/Workspace.h"
#include "libraries/criterion/cuda/CriterionUtils.cuh"
namespace {
template <class Float>
struct WorkspacePtrs {
explicit WorkspacePtrs(void* workspace, int B, int T, int N, int L) {
w2l::Workspace<> ws(workspace);
ws.request(&scale, B);
ws.request(&alpha, B, T, L);
ws.request(&alphaGrad, B, T, L);
ws.request(&transBatchGrad, B, N, N);
ws.request(&transBuf1, B, L);
ws.request(&transBuf2, B, L);
ws.request(&transBufGrad1, B, L);
ws.request(&transBufGrad2, B, L);
requiredSize = ws.requiredSize();
}
Float* scale;
double* alpha;
double* alphaGrad;
Float* transBatchGrad;
Float* transBuf1;
Float* transBuf2;
Float* transBufGrad1;
Float* transBufGrad2;
size_t requiredSize;
};
/*
* B thread blocks
* L threads/block (ideally)
*/
template <class Float>
__global__ void forwardKernel(
int T,
int N,
int _L,
const Float* _input,
const int* _target,
const int* targetSize,
const Float* trans,
Float* _loss,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* input = &_input[b * T * N];
auto* target = &_target[b * _L];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
int L = targetSize[b];
for (int i = threadIdx.x; i < L; i += blockDim.x) {
alpha[i] = i == 0 ? input[target[0]] : 0;
transBuf1[i] = trans[target[i] * N + target[i]];
transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0;
}
for (int t = 1; t < T; ++t) {
auto* inputCur = &input[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCur = &alpha[t * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
__syncthreads();
if (threadIdx.x == 0) {
if (T - t >= L) {
alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]];
}
} else if (threadIdx.x == 1) {
if (t < L) {
alphaCur[high] =
alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
// lse = logSumExp(s1, s2)
double lse =
s1 < s2 ? s2 + log(1 + exp(s1 - s2)) : s1 + log(1 + exp(s2 - s1));
alphaCur[i] = lse + inputCur[target[i]];
}
}
__syncthreads();
if (threadIdx.x == 0) {
_loss[b] = alpha[T * L - 1] * ws.scale[b];
}
}
/*
* B thread blocks
* L threads/block (ideally)
*/
template <class Float>
__global__ void backwardKernel(
int T,
int N,
int _L,
const int* _target,
const int* targetSize,
const Float* grad,
Float* _inputGrad,
Float* transGrad,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* alphaGrad = &ws.alphaGrad[b * T * _L];
auto* inputGrad = &_inputGrad[b * T * N];
auto* target = &_target[b * _L];
auto* transBatchGrad = &ws.transBatchGrad[b * N * N];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
auto* transBufGrad1 = &ws.transBufGrad1[b * _L];
auto* transBufGrad2 = &ws.transBufGrad2[b * _L];
int L = targetSize[b];
if (threadIdx.x == 0) {
alphaGrad[T * L - 1] = 1;
}
for (int t = T - 1; t > 0; --t) {
auto* inputCurGrad = &inputGrad[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCurGrad = &alphaGrad[t * L];
auto* alphaPrevGrad = &alphaGrad[(t - 1) * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
int high1 = t < L ? t + 1 : L;
int low1 = T - t < L ? L - (T - t) : 0;
__syncthreads();
for (int i = low1 + threadIdx.x; i < high1; i += blockDim.x) {
atomicAdd(&inputCurGrad[target[i]], alphaCurGrad[i]);
}
if (threadIdx.x == 0) {
if (T - t >= L) {
atomicAdd(&alphaPrevGrad[0], alphaCurGrad[0]);
transBufGrad1[0] += alphaCurGrad[0];
}
} else if (threadIdx.x == 1) {
if (t < L) {
atomicAdd(&alphaPrevGrad[high - 1], alphaCurGrad[high]);
transBufGrad2[high] += alphaCurGrad[high];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
// d1, d2 = dLogSumExp(s1, s2)
double d1, d2;
if (s1 < s2) {
d2 = 1 / (1 + exp(s1 - s2));
d1 = 1 - d2;
} else {
d1 = 1 / (1 + exp(s2 - s1));
d2 = 1 - d1;
}
atomicAdd(&alphaPrevGrad[i], d1 * alphaCurGrad[i]);
atomicAdd(&alphaPrevGrad[i - 1], d2 * alphaCurGrad[i]);
transBufGrad1[i] += d1 * alphaCurGrad[i];
transBufGrad2[i] += d2 * alphaCurGrad[i];
}
}
__syncthreads();
__shared__ Float gradScale;
if (threadIdx.x == 0) {
inputGrad[target[0]] += alphaGrad[0];
gradScale = grad[b] * ws.scale[b];
}
for (int i = threadIdx.x; i < L; i += blockDim.x) {
atomicAdd(&transBatchGrad[target[i] * N + target[i]], transBufGrad1[i]);
if (i > 0) {
atomicAdd(
&transBatchGrad[target[i] * N + target[i - 1]], transBufGrad2[i]);
}
}
__syncthreads();
for (int i = threadIdx.x; i < T * N; i += blockDim.x) {
inputGrad[i] *= gradScale;
}
for (int i = threadIdx.x; i < N * N; i += blockDim.x) {
atomicAdd(&transGrad[i], gradScale * transBatchGrad[i]);
}
}
template <class Float>
__global__ void viterbiPathKernel(
int T,
int N,
int _L,
const Float* _input,
const int* _target,
const int* targetSize,
const Float* trans,
int* bestPaths,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* input = &_input[b * T * N];
auto* target = &_target[b * _L];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
int L = targetSize[b];
for (int i = threadIdx.x; i < L * T; i += blockDim.x) {
alpha[i] =
i == 0 ? input[target[0]] : -std::numeric_limits<Float>::infinity();
}
for (int i = threadIdx.x; i < L; i += blockDim.x) {
transBuf1[i] = trans[target[i] * N + target[i]];
transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0;
}
if (L > T || L == 0) {
return;
}
for (int t = 1; t < T; ++t) {
auto* inputCur = &input[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCur = &alpha[t * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
// Ensure that all previous alphas have been computed
__syncthreads();
if (threadIdx.x == 0) {
if (T - t >= L) {
alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]];
}
} else if (threadIdx.x == 1) {
if (t < L) {
alphaCur[high] =
alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
alphaCur[i] = inputCur[target[i]] + max(s1, s2);
}
}
// Ensure all threads are finished and alphas have been computed before
// computing backward path
__syncthreads();
if (threadIdx.x == 0) {
int ltrIdx = L - 1;
for (int t = T - 1; t > 0; t--) {
bestPaths[t + (b * T)] = target[ltrIdx];
auto* alphaPrev = &alpha[(t - 1) * L];
if (ltrIdx > 0) {
double s1 = alphaPrev[ltrIdx] + transBuf1[ltrIdx];
double s2 = alphaPrev[ltrIdx - 1] + transBuf2[ltrIdx];
if (s2 > s1) {
ltrIdx--;
}
}
}
bestPaths[b * T] = target[ltrIdx];
}
}
} // namespace
namespace w2l {
namespace cuda {
template <class Float>
size_t
ForceAlignmentCriterion<Float>::getWorkspaceSize(int B, int T, int N, int L) {
return WorkspacePtrs<Float>(nullptr, B, T, N, L).requiredSize;
}
template <class Float>
void ForceAlignmentCriterion<Float>::forward(
int B,
int T,
int N,
int L,
CriterionScaleMode scaleMode,
const Float* input,
const int* target,
const int* targetSize,
const Float* trans,
Float* loss,
void* workspace,
hipStream_t stream) {
int blockSize = ::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
CriterionUtils<Float>::computeScale(
B, T, N, scaleMode, targetSize, ws.scale, stream);
hipLaunchKernelGGL(( forwardKernel), dim3(B), dim3(blockSize), 0, stream,
T, N, L, input, target, targetSize, trans, loss, ws);
}
template <class Float>
void ForceAlignmentCriterion<Float>::backward(
int B,
int T,
int N,
int L,
const int* target,
const int* targetSize,
const Float* grad,
Float* inputGrad,
Float* transGrad,
void* workspace,
hipStream_t stream) {
int blockSize = ::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
setZero(inputGrad, B * T * N, stream);
setZero(transGrad, N * N, stream);
setZero(ws.alphaGrad, B * T * L, stream);
setZero(ws.transBatchGrad, B * N * N, stream);
setZero(ws.transBufGrad1, B * L, stream);
setZero(ws.transBufGrad2, B * L, stream);
hipLaunchKernelGGL(( backwardKernel), dim3(B), dim3(blockSize), 0, stream,
T, N, L, target, targetSize, grad, inputGrad, transGrad, ws);
}
template <class Float>
void ForceAlignmentCriterion<Float>::viterbiPath(
int B,
int T,
int N,
int L,
const Float* input,
const int* target,
const int* targetSize,
const Float* trans,
int* bestPaths,
void* workspace,
hipStream_t stream) {
int blockSize = ::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
setZero(ws.alpha, B * T * L, stream);
hipLaunchKernelGGL(( viterbiPathKernel), dim3(B), dim3(blockSize), 0, stream,
T, N, L, input, target, targetSize, trans, bestPaths, ws);
}
template struct ForceAlignmentCriterion<float>;
template struct ForceAlignmentCriterion<double>;
} // namespace cuda
} // namespace w2l
| 81b474d5465072fc0c08af01244bd83e04aac7be.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh"
#include <algorithm>
#include <cmath>
#include "libraries/common/CudaUtils.cuh"
#include "libraries/common/Workspace.h"
#include "libraries/criterion/cuda/CriterionUtils.cuh"
namespace {
template <class Float>
struct WorkspacePtrs {
explicit WorkspacePtrs(void* workspace, int B, int T, int N, int L) {
w2l::Workspace<> ws(workspace);
ws.request(&scale, B);
ws.request(&alpha, B, T, L);
ws.request(&alphaGrad, B, T, L);
ws.request(&transBatchGrad, B, N, N);
ws.request(&transBuf1, B, L);
ws.request(&transBuf2, B, L);
ws.request(&transBufGrad1, B, L);
ws.request(&transBufGrad2, B, L);
requiredSize = ws.requiredSize();
}
Float* scale;
double* alpha;
double* alphaGrad;
Float* transBatchGrad;
Float* transBuf1;
Float* transBuf2;
Float* transBufGrad1;
Float* transBufGrad2;
size_t requiredSize;
};
/*
* B thread blocks
* L threads/block (ideally)
*/
template <class Float>
__global__ void forwardKernel(
int T,
int N,
int _L,
const Float* _input,
const int* _target,
const int* targetSize,
const Float* trans,
Float* _loss,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* input = &_input[b * T * N];
auto* target = &_target[b * _L];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
int L = targetSize[b];
for (int i = threadIdx.x; i < L; i += blockDim.x) {
alpha[i] = i == 0 ? input[target[0]] : 0;
transBuf1[i] = trans[target[i] * N + target[i]];
transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0;
}
for (int t = 1; t < T; ++t) {
auto* inputCur = &input[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCur = &alpha[t * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
__syncthreads();
if (threadIdx.x == 0) {
if (T - t >= L) {
alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]];
}
} else if (threadIdx.x == 1) {
if (t < L) {
alphaCur[high] =
alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
// lse = logSumExp(s1, s2)
double lse =
s1 < s2 ? s2 + log(1 + exp(s1 - s2)) : s1 + log(1 + exp(s2 - s1));
alphaCur[i] = lse + inputCur[target[i]];
}
}
__syncthreads();
if (threadIdx.x == 0) {
_loss[b] = alpha[T * L - 1] * ws.scale[b];
}
}
/*
* B thread blocks
* L threads/block (ideally)
*/
template <class Float>
__global__ void backwardKernel(
int T,
int N,
int _L,
const int* _target,
const int* targetSize,
const Float* grad,
Float* _inputGrad,
Float* transGrad,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* alphaGrad = &ws.alphaGrad[b * T * _L];
auto* inputGrad = &_inputGrad[b * T * N];
auto* target = &_target[b * _L];
auto* transBatchGrad = &ws.transBatchGrad[b * N * N];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
auto* transBufGrad1 = &ws.transBufGrad1[b * _L];
auto* transBufGrad2 = &ws.transBufGrad2[b * _L];
int L = targetSize[b];
if (threadIdx.x == 0) {
alphaGrad[T * L - 1] = 1;
}
for (int t = T - 1; t > 0; --t) {
auto* inputCurGrad = &inputGrad[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCurGrad = &alphaGrad[t * L];
auto* alphaPrevGrad = &alphaGrad[(t - 1) * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
int high1 = t < L ? t + 1 : L;
int low1 = T - t < L ? L - (T - t) : 0;
__syncthreads();
for (int i = low1 + threadIdx.x; i < high1; i += blockDim.x) {
atomicAdd(&inputCurGrad[target[i]], alphaCurGrad[i]);
}
if (threadIdx.x == 0) {
if (T - t >= L) {
atomicAdd(&alphaPrevGrad[0], alphaCurGrad[0]);
transBufGrad1[0] += alphaCurGrad[0];
}
} else if (threadIdx.x == 1) {
if (t < L) {
atomicAdd(&alphaPrevGrad[high - 1], alphaCurGrad[high]);
transBufGrad2[high] += alphaCurGrad[high];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
// d1, d2 = dLogSumExp(s1, s2)
double d1, d2;
if (s1 < s2) {
d2 = 1 / (1 + exp(s1 - s2));
d1 = 1 - d2;
} else {
d1 = 1 / (1 + exp(s2 - s1));
d2 = 1 - d1;
}
atomicAdd(&alphaPrevGrad[i], d1 * alphaCurGrad[i]);
atomicAdd(&alphaPrevGrad[i - 1], d2 * alphaCurGrad[i]);
transBufGrad1[i] += d1 * alphaCurGrad[i];
transBufGrad2[i] += d2 * alphaCurGrad[i];
}
}
__syncthreads();
__shared__ Float gradScale;
if (threadIdx.x == 0) {
inputGrad[target[0]] += alphaGrad[0];
gradScale = grad[b] * ws.scale[b];
}
for (int i = threadIdx.x; i < L; i += blockDim.x) {
atomicAdd(&transBatchGrad[target[i] * N + target[i]], transBufGrad1[i]);
if (i > 0) {
atomicAdd(
&transBatchGrad[target[i] * N + target[i - 1]], transBufGrad2[i]);
}
}
__syncthreads();
for (int i = threadIdx.x; i < T * N; i += blockDim.x) {
inputGrad[i] *= gradScale;
}
for (int i = threadIdx.x; i < N * N; i += blockDim.x) {
atomicAdd(&transGrad[i], gradScale * transBatchGrad[i]);
}
}
template <class Float>
__global__ void viterbiPathKernel(
int T,
int N,
int _L,
const Float* _input,
const int* _target,
const int* targetSize,
const Float* trans,
int* bestPaths,
WorkspacePtrs<Float> ws) {
int b = blockIdx.x;
auto* alpha = &ws.alpha[b * T * _L];
auto* input = &_input[b * T * N];
auto* target = &_target[b * _L];
auto* transBuf1 = &ws.transBuf1[b * _L];
auto* transBuf2 = &ws.transBuf2[b * _L];
int L = targetSize[b];
for (int i = threadIdx.x; i < L * T; i += blockDim.x) {
alpha[i] =
i == 0 ? input[target[0]] : -std::numeric_limits<Float>::infinity();
}
for (int i = threadIdx.x; i < L; i += blockDim.x) {
transBuf1[i] = trans[target[i] * N + target[i]];
transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0;
}
if (L > T || L == 0) {
return;
}
for (int t = 1; t < T; ++t) {
auto* inputCur = &input[t * N];
auto* alphaPrev = &alpha[(t - 1) * L];
auto* alphaCur = &alpha[t * L];
int high = t < L ? t : L;
int low = T - t < L ? L - (T - t) : 1;
// Ensure that all previous alphas have been computed
__syncthreads();
if (threadIdx.x == 0) {
if (T - t >= L) {
alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]];
}
} else if (threadIdx.x == 1) {
if (t < L) {
alphaCur[high] =
alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]];
}
}
for (int i = low + threadIdx.x; i < high; i += blockDim.x) {
double s1 = alphaPrev[i] + transBuf1[i];
double s2 = alphaPrev[i - 1] + transBuf2[i];
alphaCur[i] = inputCur[target[i]] + max(s1, s2);
}
}
// Ensure all threads are finished and alphas have been computed before
// computing backward path
__syncthreads();
if (threadIdx.x == 0) {
int ltrIdx = L - 1;
for (int t = T - 1; t > 0; t--) {
bestPaths[t + (b * T)] = target[ltrIdx];
auto* alphaPrev = &alpha[(t - 1) * L];
if (ltrIdx > 0) {
double s1 = alphaPrev[ltrIdx] + transBuf1[ltrIdx];
double s2 = alphaPrev[ltrIdx - 1] + transBuf2[ltrIdx];
if (s2 > s1) {
ltrIdx--;
}
}
}
bestPaths[b * T] = target[ltrIdx];
}
}
} // namespace
namespace w2l {
namespace cuda {
template <class Float>
size_t
ForceAlignmentCriterion<Float>::getWorkspaceSize(int B, int T, int N, int L) {
return WorkspacePtrs<Float>(nullptr, B, T, N, L).requiredSize;
}
template <class Float>
void ForceAlignmentCriterion<Float>::forward(
int B,
int T,
int N,
int L,
CriterionScaleMode scaleMode,
const Float* input,
const int* target,
const int* targetSize,
const Float* trans,
Float* loss,
void* workspace,
cudaStream_t stream) {
int blockSize = std::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
CriterionUtils<Float>::computeScale(
B, T, N, scaleMode, targetSize, ws.scale, stream);
forwardKernel<<<B, blockSize, 0, stream>>>(
T, N, L, input, target, targetSize, trans, loss, ws);
}
template <class Float>
void ForceAlignmentCriterion<Float>::backward(
int B,
int T,
int N,
int L,
const int* target,
const int* targetSize,
const Float* grad,
Float* inputGrad,
Float* transGrad,
void* workspace,
cudaStream_t stream) {
int blockSize = std::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
setZero(inputGrad, B * T * N, stream);
setZero(transGrad, N * N, stream);
setZero(ws.alphaGrad, B * T * L, stream);
setZero(ws.transBatchGrad, B * N * N, stream);
setZero(ws.transBufGrad1, B * L, stream);
setZero(ws.transBufGrad2, B * L, stream);
backwardKernel<<<B, blockSize, 0, stream>>>(
T, N, L, target, targetSize, grad, inputGrad, transGrad, ws);
}
template <class Float>
void ForceAlignmentCriterion<Float>::viterbiPath(
int B,
int T,
int N,
int L,
const Float* input,
const int* target,
const int* targetSize,
const Float* trans,
int* bestPaths,
void* workspace,
cudaStream_t stream) {
int blockSize = std::min(256, (L + 31) / 32 * 32);
WorkspacePtrs<Float> ws(workspace, B, T, N, L);
setZero(ws.alpha, B * T * L, stream);
viterbiPathKernel<<<B, blockSize, 0, stream>>>(
T, N, L, input, target, targetSize, trans, bestPaths, ws);
}
template struct ForceAlignmentCriterion<float>;
template struct ForceAlignmentCriterion<double>;
} // namespace cuda
} // namespace w2l
|
6fd406257e12815ca0106774f9b10cff1a75b281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Arrays.cuh"
#include "CrackingDES.cuh"
inline void gpuAssert(hipError_t code, char *file, int line)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
struct result
{
bool isCracked;
int keyNumber;
};
void encipherTextCPU(short * message, short * key, short * cipherMessage)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[PC1[i]-1];
D[0][i] = key[PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + leftShifts[i]];
D[i][j] = D[i-1][j + leftShifts[i]];
}
for(int j = 0; j < leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][PC2[j]-1];
else
keys[i-1][j] = D[i][PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[IP[i]-1];
R[0][i] = message[IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(P[j]-1) / SBOXSIZE][(P[j]-1) % SBOXSIZE]) % 2;
}
}
for(int i = 0; i < MSGBITLEN; i++)
{
if(reverseIP[i] < MSGBITLEN/2)
cipherMessage[i] = R[16][reverseIP[i] - 1];
else
cipherMessage[i] = L[16][reverseIP[i] - 1 - MSGBITLEN/2];
}
}
__device__ void encipherTextGPU(short * message, short * key, short * cipherMessage, bool * result)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[d_PC1[i]-1];
D[0][i] = key[d_PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - d_leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + d_leftShifts[i]];
D[i][j] = D[i-1][j + d_leftShifts[i]];
}
for(int j = 0; j < d_leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - d_leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - d_leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(d_PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][d_PC2[j]-1];
else
keys[i-1][j] = D[i][d_PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[d_IP[i]-1];
R[0][i] = message[d_IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][d_selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = d_S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(d_P[j]-1) / SBOXSIZE][(d_P[j]-1) % SBOXSIZE]) % 2;
}
}
*result = true;
for(int i = 0; i < MSGBITLEN; i++)
{
if(d_reverseIP[i] < MSGBITLEN/2)
{
if(R[16][d_reverseIP[i] - 1] != cipherMessage[i])
{
*result = false;
break;
}
}
else if(L[16][d_reverseIP[i] - 1 - MSGBITLEN/2] != cipherMessage[i])
{
*result = false;
break;
}
}
if(*result)
return;
}
__host__ __device__ void convertSignToBitArray(char sign, short * resultArray)
{
//memset(resultArray, 0 ,SIGN_SIZE);
char mask = 1;
for(int i = 0; i < SIGN_SIZE; i++)
resultArray[i] = (sign & (mask << i)) >> i;
}
__host__ __device__ void convertTextToBitArray(char * text, int length, short * resultArray)
{
//memset(resultArray, 0 ,length);
for(int i = 0; i < MAX_TEXT_LEN; i++)
{
if(i < length)
convertSignToBitArray(text[i],resultArray + i*SIGN_SIZE);
else
convertSignToBitArray('a',resultArray + i*SIGN_SIZE);
}
}
void generateRandomPermutation(int signsCount, int length, char *resultArray)
{
for(int i = 0; i < length; i++)
resultArray[i] = 'a' + rand() % signsCount;
}
__host__ __device__ void generatePermutation(unsigned long long combination, int signsCount, int length, char * resultArray)
{
for(int i = 0; i < length; i++)
{
int res = combination % signsCount;
resultArray[i] = 'a' + res;
combination /= signsCount;
}
}
__global__ void CrackingDESKernel(short * _cipherText, short * _plainText, int signsCount, unsigned long long threadsCount, int group, int keyLength, struct result * result)
{
__shared__ short cipherText[MSGBITLEN];
__shared__ short plainText[MSGBITLEN];
unsigned long long position = (blockIdx.x + group * MAXBLOCKCOUNT) * BLOCKSIZE + threadIdx.x;
if(threadIdx.x < MSGBITLEN)
{
cipherText[threadIdx.x] = _cipherText[threadIdx.x];
plainText[threadIdx.x] = _plainText[threadIdx.x];
}
__syncthreads();
if(position >= threadsCount)
return;
char * code = new char[MSGLEN];
short * key = new short[MSGBITLEN];
bool * res = new bool[1];
generatePermutation(position, signsCount, MSGLEN, code);
convertTextToBitArray(code,keyLength,key);
encipherTextGPU(plainText, key, cipherText, res);
if(*res)
{
result->isCracked = true;
result->keyNumber = position;
}
delete[] code;
delete[] key;
delete[] res;
return;
}
void ERR(char *msg)
{
fprintf(stderr,"Error: %s\n", msg);
exit(1);
}
int main()
{
char * plainText = new char[MSGLEN+1];
char * key = new char[MSGLEN+1];
short * plainBitText = new short[MSGBITLEN];
short * cipherBitText = new short[MSGBITLEN];
short * keyBit = new short[MSGBITLEN];
hipEvent_t timerStart, timerStop;
float timer;
short * d_cipherText, * d_plainText;
int signsCount = 0;
printf("Enter the alphabet size (from 1 to 26).\n");
scanf("%d", &signsCount);
printf("Enter the plain text (maximum 8 signs).\n");
scanf("%s", plainText);
convertTextToBitArray(plainText,8,plainBitText);
printf("Enter the key text (maximum 8 signs).\n");
scanf("%s", key);
int keyLength = strlen(key);
int option = 0;
printf("Choose cracking type: 0 - sequentialy, 1 - randomize.\n");
scanf("%d", &option);
convertTextToBitArray(key,keyLength,keyBit);
encipherTextCPU(plainBitText, keyBit, cipherBitText);
printf("Cipher text generated from given text and key, now lets try to crack it.\n");
if(hipMalloc((void**) &d_cipherText, sizeof(short)*MSGBITLEN) != hipSuccess)
ERR("hipMalloc");
if(hipMemcpy(d_cipherText, cipherBitText, sizeof(short)*MSGBITLEN, hipMemcpyHostToDevice) != hipSuccess)
ERR("hipMemcpy");
if(hipMalloc((void**) &d_plainText, sizeof(short)*MSGBITLEN) != hipSuccess)
ERR("hipMalloc");
char * code = new char[MSGLEN];
struct result * result = new struct result;
result->isCracked = false;
result->keyNumber = -1;
struct result * d_result;
if(hipMalloc((void**) &d_result, sizeof(struct result)) != hipSuccess)
ERR("hipMalloc");
if(hipMemcpy(d_result, result, sizeof(struct result), hipMemcpyHostToDevice) != hipSuccess)
ERR("hipMemcpy");
unsigned long long threadsCount = 1;
for(int i = 0; i < keyLength; i++)
threadsCount *= signsCount;
int blocksCount = threadsCount / BLOCKSIZE + 1;
int groupsCount = 1;
if(blocksCount > MAXBLOCKCOUNT)
{
groupsCount = blocksCount / MAXBLOCKCOUNT + 1;
blocksCount = MAXBLOCKCOUNT;
}
unsigned long long messageCombination = 0;
unsigned long long textsCount = 1;
for(int i = 0; i < MSGLEN; i++)
textsCount *= signsCount;
srand(time(NULL));
hipEventCreate(&timerStart, 0);
hipEventCreate(&timerStop, 0);
hipEventRecord(timerStart, 0);
while(messageCombination < textsCount || option)
{
printf("Cracking iteration %lld of %lld\n",messageCombination, textsCount);
if(!option)
generatePermutation(messageCombination, signsCount, MSGLEN, code);
else
generateRandomPermutation(signsCount, MSGLEN, code);
convertTextToBitArray(code,MSGLEN,plainBitText);
messageCombination++;
if(hipMemcpy(d_plainText, plainBitText, sizeof(short)*MSGBITLEN, hipMemcpyHostToDevice) != hipSuccess)
ERR("hipMemcpy");
for(int group = 0; group < groupsCount; group++)
{
hipLaunchKernelGGL(( CrackingDESKernel), dim3(blocksCount),dim3(BLOCKSIZE), 0, 0, d_cipherText, d_plainText, signsCount, threadsCount, group, keyLength, d_result);
gpuErrchk(hipPeekAtLastError());
if(hipDeviceSynchronize() != hipSuccess)
ERR("hipDeviceSynchronize");
if(hipMemcpy(result, d_result, sizeof(struct result), hipMemcpyDeviceToHost) != hipSuccess)
ERR("hipMemcpy");
if(result->isCracked)
break;
}
if(result->isCracked)
{
printf("MESSAGE CRACKED\n");
printf("MSG: ");
for(int i=0; i < MSGLEN; i++)
printf("%c",code[i]);
printf("\n");
generatePermutation(result->keyNumber, signsCount, MSGLEN, code);
printf("KEY: ");
for(int i=0; i < keyLength; i++)
printf("%c",code[i]);
printf("\n");
break;
}
}
if(hipEventRecord(timerStop, 0) != hipSuccess)
ERR("hipEventRecord");
if(hipEventSynchronize(timerStop) != hipSuccess)
ERR("hipEventSynchronize");
if(hipDeviceSynchronize() != hipSuccess)
ERR("hipDeviceSynchronize");
hipEventElapsedTime(&timer, timerStart, timerStop);
printf("\n");
printf("TIME = %d s %d ms\n", ((int)timer) / 1000, ((int)timer) % 1000);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
if(hipFree(d_cipherText) != hipSuccess)
ERR("hipFree");
if(hipFree(d_plainText) != hipSuccess)
ERR("hipFree");
delete[] plainText;
delete[] key;
delete[] plainBitText;
delete[] cipherBitText;
delete[] keyBit;
} | 6fd406257e12815ca0106774f9b10cff1a75b281.cu | #include "Arrays.cuh"
#include "CrackingDES.cuh"
inline void gpuAssert(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
struct result
{
bool isCracked;
int keyNumber;
};
void encipherTextCPU(short * message, short * key, short * cipherMessage)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[PC1[i]-1];
D[0][i] = key[PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + leftShifts[i]];
D[i][j] = D[i-1][j + leftShifts[i]];
}
for(int j = 0; j < leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][PC2[j]-1];
else
keys[i-1][j] = D[i][PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[IP[i]-1];
R[0][i] = message[IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(P[j]-1) / SBOXSIZE][(P[j]-1) % SBOXSIZE]) % 2;
}
}
for(int i = 0; i < MSGBITLEN; i++)
{
if(reverseIP[i] < MSGBITLEN/2)
cipherMessage[i] = R[16][reverseIP[i] - 1];
else
cipherMessage[i] = L[16][reverseIP[i] - 1 - MSGBITLEN/2];
}
}
__device__ void encipherTextGPU(short * message, short * key, short * cipherMessage, bool * result)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[d_PC1[i]-1];
D[0][i] = key[d_PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - d_leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + d_leftShifts[i]];
D[i][j] = D[i-1][j + d_leftShifts[i]];
}
for(int j = 0; j < d_leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - d_leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - d_leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(d_PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][d_PC2[j]-1];
else
keys[i-1][j] = D[i][d_PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[d_IP[i]-1];
R[0][i] = message[d_IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][d_selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = d_S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(d_P[j]-1) / SBOXSIZE][(d_P[j]-1) % SBOXSIZE]) % 2;
}
}
*result = true;
for(int i = 0; i < MSGBITLEN; i++)
{
if(d_reverseIP[i] < MSGBITLEN/2)
{
if(R[16][d_reverseIP[i] - 1] != cipherMessage[i])
{
*result = false;
break;
}
}
else if(L[16][d_reverseIP[i] - 1 - MSGBITLEN/2] != cipherMessage[i])
{
*result = false;
break;
}
}
if(*result)
return;
}
__host__ __device__ void convertSignToBitArray(char sign, short * resultArray)
{
//memset(resultArray, 0 ,SIGN_SIZE);
char mask = 1;
for(int i = 0; i < SIGN_SIZE; i++)
resultArray[i] = (sign & (mask << i)) >> i;
}
__host__ __device__ void convertTextToBitArray(char * text, int length, short * resultArray)
{
//memset(resultArray, 0 ,length);
for(int i = 0; i < MAX_TEXT_LEN; i++)
{
if(i < length)
convertSignToBitArray(text[i],resultArray + i*SIGN_SIZE);
else
convertSignToBitArray('a',resultArray + i*SIGN_SIZE);
}
}
void generateRandomPermutation(int signsCount, int length, char *resultArray)
{
for(int i = 0; i < length; i++)
resultArray[i] = 'a' + rand() % signsCount;
}
__host__ __device__ void generatePermutation(unsigned long long combination, int signsCount, int length, char * resultArray)
{
for(int i = 0; i < length; i++)
{
int res = combination % signsCount;
resultArray[i] = 'a' + res;
combination /= signsCount;
}
}
__global__ void CrackingDESKernel(short * _cipherText, short * _plainText, int signsCount, unsigned long long threadsCount, int group, int keyLength, struct result * result)
{
__shared__ short cipherText[MSGBITLEN];
__shared__ short plainText[MSGBITLEN];
unsigned long long position = (blockIdx.x + group * MAXBLOCKCOUNT) * BLOCKSIZE + threadIdx.x;
if(threadIdx.x < MSGBITLEN)
{
cipherText[threadIdx.x] = _cipherText[threadIdx.x];
plainText[threadIdx.x] = _plainText[threadIdx.x];
}
__syncthreads();
if(position >= threadsCount)
return;
char * code = new char[MSGLEN];
short * key = new short[MSGBITLEN];
bool * res = new bool[1];
generatePermutation(position, signsCount, MSGLEN, code);
convertTextToBitArray(code,keyLength,key);
encipherTextGPU(plainText, key, cipherText, res);
if(*res)
{
result->isCracked = true;
result->keyNumber = position;
}
delete[] code;
delete[] key;
delete[] res;
return;
}
void ERR(char *msg)
{
fprintf(stderr,"Error: %s\n", msg);
exit(1);
}
int main()
{
char * plainText = new char[MSGLEN+1];
char * key = new char[MSGLEN+1];
short * plainBitText = new short[MSGBITLEN];
short * cipherBitText = new short[MSGBITLEN];
short * keyBit = new short[MSGBITLEN];
cudaEvent_t timerStart, timerStop;
float timer;
short * d_cipherText, * d_plainText;
int signsCount = 0;
printf("Enter the alphabet size (from 1 to 26).\n");
scanf("%d", &signsCount);
printf("Enter the plain text (maximum 8 signs).\n");
scanf("%s", plainText);
convertTextToBitArray(plainText,8,plainBitText);
printf("Enter the key text (maximum 8 signs).\n");
scanf("%s", key);
int keyLength = strlen(key);
int option = 0;
printf("Choose cracking type: 0 - sequentialy, 1 - randomize.\n");
scanf("%d", &option);
convertTextToBitArray(key,keyLength,keyBit);
encipherTextCPU(plainBitText, keyBit, cipherBitText);
printf("Cipher text generated from given text and key, now lets try to crack it.\n");
if(cudaMalloc((void**) &d_cipherText, sizeof(short)*MSGBITLEN) != cudaSuccess)
ERR("cudaMalloc");
if(cudaMemcpy(d_cipherText, cipherBitText, sizeof(short)*MSGBITLEN, cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
if(cudaMalloc((void**) &d_plainText, sizeof(short)*MSGBITLEN) != cudaSuccess)
ERR("cudaMalloc");
char * code = new char[MSGLEN];
struct result * result = new struct result;
result->isCracked = false;
result->keyNumber = -1;
struct result * d_result;
if(cudaMalloc((void**) &d_result, sizeof(struct result)) != cudaSuccess)
ERR("cudaMalloc");
if(cudaMemcpy(d_result, result, sizeof(struct result), cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
unsigned long long threadsCount = 1;
for(int i = 0; i < keyLength; i++)
threadsCount *= signsCount;
int blocksCount = threadsCount / BLOCKSIZE + 1;
int groupsCount = 1;
if(blocksCount > MAXBLOCKCOUNT)
{
groupsCount = blocksCount / MAXBLOCKCOUNT + 1;
blocksCount = MAXBLOCKCOUNT;
}
unsigned long long messageCombination = 0;
unsigned long long textsCount = 1;
for(int i = 0; i < MSGLEN; i++)
textsCount *= signsCount;
srand(time(NULL));
cudaEventCreate(&timerStart, 0);
cudaEventCreate(&timerStop, 0);
cudaEventRecord(timerStart, 0);
while(messageCombination < textsCount || option)
{
printf("Cracking iteration %lld of %lld\n",messageCombination, textsCount);
if(!option)
generatePermutation(messageCombination, signsCount, MSGLEN, code);
else
generateRandomPermutation(signsCount, MSGLEN, code);
convertTextToBitArray(code,MSGLEN,plainBitText);
messageCombination++;
if(cudaMemcpy(d_plainText, plainBitText, sizeof(short)*MSGBITLEN, cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
for(int group = 0; group < groupsCount; group++)
{
CrackingDESKernel<<<blocksCount,BLOCKSIZE>>>(d_cipherText, d_plainText, signsCount, threadsCount, group, keyLength, d_result);
gpuErrchk(cudaPeekAtLastError());
if(cudaDeviceSynchronize() != cudaSuccess)
ERR("cudaDeviceSynchronize");
if(cudaMemcpy(result, d_result, sizeof(struct result), cudaMemcpyDeviceToHost) != cudaSuccess)
ERR("cudaMemcpy");
if(result->isCracked)
break;
}
if(result->isCracked)
{
printf("MESSAGE CRACKED\n");
printf("MSG: ");
for(int i=0; i < MSGLEN; i++)
printf("%c",code[i]);
printf("\n");
generatePermutation(result->keyNumber, signsCount, MSGLEN, code);
printf("KEY: ");
for(int i=0; i < keyLength; i++)
printf("%c",code[i]);
printf("\n");
break;
}
}
if(cudaEventRecord(timerStop, 0) != cudaSuccess)
ERR("cudaEventRecord");
if(cudaEventSynchronize(timerStop) != cudaSuccess)
ERR("cudaEventSynchronize");
if(cudaDeviceSynchronize() != cudaSuccess)
ERR("cudaDeviceSynchronize");
cudaEventElapsedTime(&timer, timerStart, timerStop);
printf("\n");
printf("TIME = %d s %d ms\n", ((int)timer) / 1000, ((int)timer) % 1000);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
if(cudaFree(d_cipherText) != cudaSuccess)
ERR("cudaFree");
if(cudaFree(d_plainText) != cudaSuccess)
ERR("cudaFree");
delete[] plainText;
delete[] key;
delete[] plainBitText;
delete[] cipherBitText;
delete[] keyBit;
} |
29331fe20fb3642a926ac752568943c2f7d95d83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <algorithm>
#include <cstdint>
#include "graph.cuh"
#include "gettime.h"
#include "MST.h"
#include "parallel.h"
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/constant_iterator.h>
const int BlockSize = 256;
using namespace std;
__global__
void init_Edges(wghEdge<intT> *input, intT size, intT *u, intT *v, double *w, intT *id) {
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
wghEdge<intT> e = input[pos];
u[pos] = e.u;
v[pos] = e.v;
w[pos] = e.weight;
id[pos] = pos;
u[pos+size] = e.v;
v[pos+size] = e.u;
w[pos+size] = e.weight;
id[pos+size] = pos;
}
}
struct UndirectedEdges {
thrust::device_vector<intT> s;
thrust::device_vector<intT> t;
thrust::device_vector<intT> id; // this stores the id marked after split_graph
thrust::device_vector<float> w;
thrust::device_vector<intT> result_id; // this stores the id for final result (the original id in the input file)
intT n_edges;
intT n_vertices;
struct init_operator {
typedef thrust::tuple<intT, intT, intT, float> Tuple;
__host__ __device__
Tuple operator() (const wghEdge<intT>& edge, const intT idx) {
return thrust::make_tuple(edge.u, edge.v, idx, edge.weight);
}
};
UndirectedEdges() {}
UndirectedEdges(intT m, intT n) :
s(m), t(m), id(m), w(m), result_id(m), n_edges(m), n_vertices(n) {}
UndirectedEdges(const wghEdgeArray<intT>& G):
s(G.m), t(G.m), id(G.m), w(G.m), result_id(G.m), n_edges(G.m), n_vertices(G.n) {
thrust::device_vector<wghEdge<intT>> E(G.E, G.E + G.m);
thrust::transform(
E.begin(), E.end(), thrust::make_counting_iterator(0),
thrust::make_zip_iterator(thrust::make_tuple(
s.begin(), t.begin(), result_id.begin(), w.begin())),
init_operator());
}
};
struct Edges {
thrust::device_vector<intT> u;
thrust::device_vector<intT> v;
thrust::device_vector<intT> id;
thrust::device_vector<double> w;
intT n_edges;
intT n_vertices;
Edges() { }
Edges(const wghEdgeArray<intT>& G) :
u(G.m*2), v(G.m*2), id(G.m*2), w(G.m*2), n_edges(G.m*2), n_vertices(G.n) {
thrust::device_vector<wghEdge<intT>> E(G.E, G.E + G.m);
hipLaunchKernelGGL(( init_Edges), dim3((G.m + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(E.data()), G.m,
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(v.data()),
thrust::raw_pointer_cast(w.data()),
thrust::raw_pointer_cast(id.data()));
}
Edges(intT m, intT n) : u(m), v(m), id(m), w(m), n_edges(m), n_vertices(n) { }
};
template<typename T>
void print_vector(const T& vec, string text, uint32_t size=100) {
cout << text << endl;
for (size_t i = 0; i < vec.size() && i < size; ++i) {
cout << " " << vec[i];
}
cout << endl;
}
//--------------------------------------------------------------------------------
// kernels for mst
//--------------------------------------------------------------------------------
__global__
void remove_circles(intT *input, size_t size, intT* id, intT* output, intT *aux)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
intT successor = input[pos];
intT s_successor = input[successor];
successor = ((successor > pos) && (s_successor == pos)) ? pos : successor;
//if ((successor > pos) && (s_successor == pos)) {
// successor = pos;
//}
output[pos] = successor;
if (aux) {
aux[pos] = (successor != pos) && (id[pos] >= 0);
}
}
}
__global__
void merge_vertices(intT *successors, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
bool goon = true;
int i = 0;
while (goon && (i++ < 50)) {
intT successor = successors[pos];
intT ssuccessor= successors[successor];
__syncthreads();
if (ssuccessor != successor) {
successors[pos] = ssuccessor;
}
goon = __any(ssuccessor != successor);
__syncthreads();
}
}
}
__global__
void mark_segments(intT *input, intT *output, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
output[pos] = ((pos == size-1) || (input[pos] != input[pos+1]));
}
}
__global__
void mark_edges_to_keep(
const intT *u, const intT *v,
const intT *new_vertices, intT *output, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
// true means the edge will be kept
output[pos] = (new_vertices[u[pos]] != new_vertices[v[pos]]);
}
}
__global__
void update_edges_with_new_vertices(
intT *u, intT *v, intT *new_vertices, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
u[pos] = new_vertices[u[pos]];
v[pos] = new_vertices[v[pos]];
}
}
//--------------------------------------------------------------------------------
// functors
//--------------------------------------------------------------------------------
__host__ __device__ bool operator< (const int2& a, const int2& b) {
return (a.x == b.x) ? (a.y < b.y) : (a.x < b.x);
};
struct binop_tuple_minimum {
typedef thrust::tuple<double, intT, intT> T; // (w, v, id)
__host__ __device__
T operator() (const T& a, const T& b) const {
return (thrust::get<0>(a) == thrust::get<0>(b)) ?
((thrust::get<1>(a) < thrust::get<1>(b)) ? a : b) :
((thrust::get<0>(a) < thrust::get<0>(b)) ? a : b);
}
};
//--------------------------------------------------------------------------------
// GPU MST
//--------------------------------------------------------------------------------
vector<pair<uint32_t, uint32_t>> split_graph(UndirectedEdges& edges)
{
vector<pair<uint32_t, uint32_t>> result;
UndirectedEdges edges_temp(edges.n_edges, edges.n_vertices);
thrust::device_vector<int> indices(edges.n_edges);
thrust::sequence(indices.begin(), indices.begin() + edges.n_edges);
thrust::sort_by_key(edges.w.begin(), edges.w.end(), indices.begin());
thrust::gather(indices.begin(), indices.end(),
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin(), edges.t.begin(), edges.result_id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.s.begin(), edges_temp.t.begin(), edges_temp.result_id.begin())));
thrust::sequence(edges.id.begin(), edges.id.end());
edges_temp.s.swap(edges.s);
edges_temp.t.swap(edges.t);
edges_temp.result_id.swap(edges.result_id);
for (uint32_t i = 0, k = 1; i < edges.n_edges; ++k) {
uint32_t size_k = min(int(pow(2, k-1)) * edges.n_vertices, edges.n_edges - i);
result.push_back(make_pair(i, i+size_k));
i += size_k;
}
return result;
}
void contract_and_build_subgraph(
uint32_t begin, uint32_t end,
UndirectedEdges& edges,
const thrust::device_vector<intT>& supervertices,
Edges& output)
{
uint32_t size = end - begin;
thrust::gather(edges.s.begin()+begin, edges.s.begin()+end,
supervertices.begin(),
edges.s.begin()+begin);
thrust::gather(edges.t.begin()+begin, edges.t.begin()+end,
supervertices.begin(),
edges.t.begin()+begin);
// build subgraph in directed edge list style
thrust::device_vector<intT> flags(size, 0);
thrust::device_vector<intT> indices(size);
hipLaunchKernelGGL(( mark_edges_to_keep), dim3((size + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(edges.s.data()) + begin,
thrust::raw_pointer_cast(edges.t.data()) + begin,
thrust::raw_pointer_cast(supervertices.data()),
thrust::raw_pointer_cast(flags.data()), size);
thrust::exclusive_scan(flags.begin(), flags.begin() + size, indices.begin());
size = flags[size-1] + indices[size-1];
output.u.resize(size*2);
output.v.resize(size*2);
output.w.resize(size*2);
output.id.resize(size*2);
output.n_edges = size*2;
// parallel filtering edges
thrust::scatter_if(
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin()+begin, edges.t.begin()+begin, edges.w.begin()+begin, edges.id.begin()+begin)),
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin()+end, edges.t.begin()+end, edges.w.begin()+end, edges.id.begin()+end)),
indices.begin(), flags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin(), output.v.begin(), output.w.begin(), output.id.begin()))
);
thrust::copy(
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin(), output.v.begin(), output.w.begin(), output.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin()+size, output.v.begin()+size, output.w.begin()+size, output.id.begin()+size)),
thrust::make_zip_iterator(thrust::make_tuple(
output.v.begin()+size, output.u.begin()+size, output.w.begin()+size, output.id.begin()+size))
);
}
void boruvka_mst(
Edges& edges,
thrust::device_vector<intT>& supervertices,
thrust::device_vector<intT>& mst_edges,
intT &n_mst)
{
if (!edges.n_edges) return;
assert(supervertices.size() == edges.n_vertices);
size_t n_edges = edges.n_edges;
size_t n_vertices = edges.n_vertices;
thrust::device_vector<intT> succ_id(n_vertices);
thrust::device_vector<intT> succ_indices(n_vertices);
thrust::device_vector<intT> succ_temp(n_vertices);
thrust::device_vector<int> indices(n_edges);
thrust::device_vector<int> flags(n_edges);
Edges edges_temp(edges.n_edges, edges.n_vertices);
while (1) {
if (n_edges == 1) {
mst_edges[n_mst++] = edges.id[0];
return;
}
thrust::sequence(indices.begin(), indices.begin() + n_edges);
thrust::sort_by_key(edges.u.begin(), edges.u.begin() + n_edges, indices.begin());
thrust::gather(indices.begin(), indices.begin() + n_edges,
thrust::make_zip_iterator(thrust::make_tuple(
edges.v.begin(), edges.w.begin(), edges.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin(), edges_temp.w.begin(), edges_temp.id.begin())));
edges_temp.v.swap(edges.v);
edges_temp.w.swap(edges.w);
edges_temp.id.swap(edges.id);
auto new_last = thrust::reduce_by_key(
edges.u.begin(), edges.u.begin() + n_edges,
thrust::make_zip_iterator(thrust::make_tuple(
edges.w.begin(), edges.v.begin(), edges.id.begin())),
edges_temp.u.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.w.begin(), edges_temp.v.begin(), edges_temp.id.begin())),
thrust::equal_to<intT>(),
binop_tuple_minimum());
size_t n_min_edges = new_last.first - edges_temp.u.begin();
// succ_indices is temporary for succ, the array of successors
thrust::fill(succ_id.begin(), succ_id.end(), -1);
thrust::scatter(
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin(), edges_temp.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin() + n_min_edges, edges_temp.id.begin() + n_min_edges)),
edges_temp.u.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
supervertices.begin(), succ_id.begin())));
// succ_tmp stores which succ are to be saved (1)/ dumped
// after this, succ_indices stores the array of successors
// since we maintain a global supervertices, we need to use succ_id to know which vertices represents the newly generated mst edges
hipLaunchKernelGGL(( remove_circles), dim3((edges.n_vertices + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(supervertices.data()), edges.n_vertices,
thrust::raw_pointer_cast(succ_id.data()),
thrust::raw_pointer_cast(succ_indices.data()),
thrust::raw_pointer_cast(succ_temp.data()));
supervertices.swap(succ_indices);
thrust::exclusive_scan(succ_temp.begin(), succ_temp.begin() + n_vertices,
succ_indices.begin());
// save new mst edges
thrust::scatter_if(succ_id.begin(), succ_id.begin() + n_vertices,
succ_indices.begin(), succ_temp.begin(), mst_edges.begin() + n_mst);
n_mst += succ_indices[n_vertices-1] + succ_temp[n_vertices-1];
// generating super vertices (new vertices)
//thrust::sequence(succ_indices.begin(), succ_indices.begin() + n_vertices);
hipLaunchKernelGGL(( merge_vertices), dim3((edges.n_vertices + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(supervertices.data()), edges.n_vertices);
// generating new edges
hipLaunchKernelGGL(( mark_edges_to_keep), dim3((n_edges + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(edges.u.data()),
thrust::raw_pointer_cast(edges.v.data()),
thrust::raw_pointer_cast(supervertices.data()),
thrust::raw_pointer_cast(flags.data()), n_edges);
thrust::exclusive_scan(flags.begin(), flags.begin() + n_edges,
indices.begin());
intT new_edge_size = indices[n_edges-1] + flags[n_edges-1];
if (!new_edge_size) { return; }
thrust::scatter_if(
thrust::make_zip_iterator(thrust::make_tuple(
edges.u.begin(), edges.v.begin(), edges.w.begin(), edges.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges.u.begin() + n_edges, edges.v.begin() + n_edges, edges.w.begin() + n_edges, edges.id.begin() + n_edges)),
indices.begin(), flags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.u.begin(), edges_temp.v.begin(), edges_temp.w.begin(), edges_temp.id.begin()))
);
hipLaunchKernelGGL(( update_edges_with_new_vertices), dim3((new_edge_size + BlockSize - 1) / BlockSize), dim3(BlockSize), 0, 0,
thrust::raw_pointer_cast(edges_temp.v.data()),
thrust::raw_pointer_cast(edges_temp.u.data()),
thrust::raw_pointer_cast(supervertices.data()), new_edge_size);
edges.u.swap(edges_temp.u);
edges.v.swap(edges_temp.v);
edges.w.swap(edges_temp.w);
edges.id.swap(edges_temp.id);
assert(n_edges != new_edge_size);
n_edges = new_edge_size;
}
}
//--------------------------------------------------------------------------------
// top level mst
//--------------------------------------------------------------------------------
std::pair<intT*,intT> mst(wghEdgeArray<intT> G)
{
startTime();
UndirectedEdges edges(G);
nextTime("prepare graph");
Edges subgraph;
subgraph.n_vertices = G.n;
thrust::device_vector<intT> supervertices(G.n);
thrust::device_vector<intT> mst_edges(G.m);
intT n_mst = 0;
thrust::sequence(supervertices.begin(), supervertices.end());
auto split_indices = split_graph(edges);
for (auto it = split_indices.begin(); ; ) {
contract_and_build_subgraph(
it->first, it->second, edges, supervertices,
subgraph);
// this step, contrary to the paper, also includes connect components, by update the global super vertices
boruvka_mst(subgraph, supervertices, mst_edges, n_mst);
if (split_indices.end() == (++it)) break;
}
// fetch result ids, stored to edges.id temporarily
thrust::gather(mst_edges.begin(), mst_edges.begin() + n_mst,
edges.result_id.begin(), edges.id.begin());
intT *result_mst_edges = new intT[n_mst];
thrust::copy(edges.id.begin(), edges.id.begin() + n_mst, result_mst_edges);
return make_pair(result_mst_edges, n_mst);
}
| 29331fe20fb3642a926ac752568943c2f7d95d83.cu | #include <iostream>
#include <algorithm>
#include <cstdint>
#include "graph.cuh"
#include "gettime.h"
#include "MST.h"
#include "parallel.h"
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/constant_iterator.h>
const int BlockSize = 256;
using namespace std;
__global__
void init_Edges(wghEdge<intT> *input, intT size, intT *u, intT *v, double *w, intT *id) {
const int pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
wghEdge<intT> e = input[pos];
u[pos] = e.u;
v[pos] = e.v;
w[pos] = e.weight;
id[pos] = pos;
u[pos+size] = e.v;
v[pos+size] = e.u;
w[pos+size] = e.weight;
id[pos+size] = pos;
}
}
struct UndirectedEdges {
thrust::device_vector<intT> s;
thrust::device_vector<intT> t;
thrust::device_vector<intT> id; // this stores the id marked after split_graph
thrust::device_vector<float> w;
thrust::device_vector<intT> result_id; // this stores the id for final result (the original id in the input file)
intT n_edges;
intT n_vertices;
struct init_operator {
typedef thrust::tuple<intT, intT, intT, float> Tuple;
__host__ __device__
Tuple operator() (const wghEdge<intT>& edge, const intT idx) {
return thrust::make_tuple(edge.u, edge.v, idx, edge.weight);
}
};
UndirectedEdges() {}
UndirectedEdges(intT m, intT n) :
s(m), t(m), id(m), w(m), result_id(m), n_edges(m), n_vertices(n) {}
UndirectedEdges(const wghEdgeArray<intT>& G):
s(G.m), t(G.m), id(G.m), w(G.m), result_id(G.m), n_edges(G.m), n_vertices(G.n) {
thrust::device_vector<wghEdge<intT>> E(G.E, G.E + G.m);
thrust::transform(
E.begin(), E.end(), thrust::make_counting_iterator(0),
thrust::make_zip_iterator(thrust::make_tuple(
s.begin(), t.begin(), result_id.begin(), w.begin())),
init_operator());
}
};
struct Edges {
thrust::device_vector<intT> u;
thrust::device_vector<intT> v;
thrust::device_vector<intT> id;
thrust::device_vector<double> w;
intT n_edges;
intT n_vertices;
Edges() { }
Edges(const wghEdgeArray<intT>& G) :
u(G.m*2), v(G.m*2), id(G.m*2), w(G.m*2), n_edges(G.m*2), n_vertices(G.n) {
thrust::device_vector<wghEdge<intT>> E(G.E, G.E + G.m);
init_Edges<<<(G.m + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(E.data()), G.m,
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(v.data()),
thrust::raw_pointer_cast(w.data()),
thrust::raw_pointer_cast(id.data()));
}
Edges(intT m, intT n) : u(m), v(m), id(m), w(m), n_edges(m), n_vertices(n) { }
};
template<typename T>
void print_vector(const T& vec, string text, uint32_t size=100) {
cout << text << endl;
for (size_t i = 0; i < vec.size() && i < size; ++i) {
cout << " " << vec[i];
}
cout << endl;
}
//--------------------------------------------------------------------------------
// kernels for mst
//--------------------------------------------------------------------------------
__global__
void remove_circles(intT *input, size_t size, intT* id, intT* output, intT *aux)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
intT successor = input[pos];
intT s_successor = input[successor];
successor = ((successor > pos) && (s_successor == pos)) ? pos : successor;
//if ((successor > pos) && (s_successor == pos)) {
// successor = pos;
//}
output[pos] = successor;
if (aux) {
aux[pos] = (successor != pos) && (id[pos] >= 0);
}
}
}
__global__
void merge_vertices(intT *successors, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
bool goon = true;
int i = 0;
while (goon && (i++ < 50)) {
intT successor = successors[pos];
intT ssuccessor= successors[successor];
__syncthreads();
if (ssuccessor != successor) {
successors[pos] = ssuccessor;
}
goon = __any(ssuccessor != successor);
__syncthreads();
}
}
}
__global__
void mark_segments(intT *input, intT *output, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
output[pos] = ((pos == size-1) || (input[pos] != input[pos+1]));
}
}
__global__
void mark_edges_to_keep(
const intT *u, const intT *v,
const intT *new_vertices, intT *output, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
// true means the edge will be kept
output[pos] = (new_vertices[u[pos]] != new_vertices[v[pos]]);
}
}
__global__
void update_edges_with_new_vertices(
intT *u, intT *v, intT *new_vertices, size_t size)
{
const uint32_t pos = threadIdx.x + blockIdx.x * blockDim.x;
if (pos < size) {
u[pos] = new_vertices[u[pos]];
v[pos] = new_vertices[v[pos]];
}
}
//--------------------------------------------------------------------------------
// functors
//--------------------------------------------------------------------------------
__host__ __device__ bool operator< (const int2& a, const int2& b) {
return (a.x == b.x) ? (a.y < b.y) : (a.x < b.x);
};
struct binop_tuple_minimum {
typedef thrust::tuple<double, intT, intT> T; // (w, v, id)
__host__ __device__
T operator() (const T& a, const T& b) const {
return (thrust::get<0>(a) == thrust::get<0>(b)) ?
((thrust::get<1>(a) < thrust::get<1>(b)) ? a : b) :
((thrust::get<0>(a) < thrust::get<0>(b)) ? a : b);
}
};
//--------------------------------------------------------------------------------
// GPU MST
//--------------------------------------------------------------------------------
vector<pair<uint32_t, uint32_t>> split_graph(UndirectedEdges& edges)
{
vector<pair<uint32_t, uint32_t>> result;
UndirectedEdges edges_temp(edges.n_edges, edges.n_vertices);
thrust::device_vector<int> indices(edges.n_edges);
thrust::sequence(indices.begin(), indices.begin() + edges.n_edges);
thrust::sort_by_key(edges.w.begin(), edges.w.end(), indices.begin());
thrust::gather(indices.begin(), indices.end(),
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin(), edges.t.begin(), edges.result_id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.s.begin(), edges_temp.t.begin(), edges_temp.result_id.begin())));
thrust::sequence(edges.id.begin(), edges.id.end());
edges_temp.s.swap(edges.s);
edges_temp.t.swap(edges.t);
edges_temp.result_id.swap(edges.result_id);
for (uint32_t i = 0, k = 1; i < edges.n_edges; ++k) {
uint32_t size_k = min(int(pow(2, k-1)) * edges.n_vertices, edges.n_edges - i);
result.push_back(make_pair(i, i+size_k));
i += size_k;
}
return result;
}
void contract_and_build_subgraph(
uint32_t begin, uint32_t end,
UndirectedEdges& edges,
const thrust::device_vector<intT>& supervertices,
Edges& output)
{
uint32_t size = end - begin;
thrust::gather(edges.s.begin()+begin, edges.s.begin()+end,
supervertices.begin(),
edges.s.begin()+begin);
thrust::gather(edges.t.begin()+begin, edges.t.begin()+end,
supervertices.begin(),
edges.t.begin()+begin);
// build subgraph in directed edge list style
thrust::device_vector<intT> flags(size, 0);
thrust::device_vector<intT> indices(size);
mark_edges_to_keep<<<(size + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(edges.s.data()) + begin,
thrust::raw_pointer_cast(edges.t.data()) + begin,
thrust::raw_pointer_cast(supervertices.data()),
thrust::raw_pointer_cast(flags.data()), size);
thrust::exclusive_scan(flags.begin(), flags.begin() + size, indices.begin());
size = flags[size-1] + indices[size-1];
output.u.resize(size*2);
output.v.resize(size*2);
output.w.resize(size*2);
output.id.resize(size*2);
output.n_edges = size*2;
// parallel filtering edges
thrust::scatter_if(
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin()+begin, edges.t.begin()+begin, edges.w.begin()+begin, edges.id.begin()+begin)),
thrust::make_zip_iterator(thrust::make_tuple(
edges.s.begin()+end, edges.t.begin()+end, edges.w.begin()+end, edges.id.begin()+end)),
indices.begin(), flags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin(), output.v.begin(), output.w.begin(), output.id.begin()))
);
thrust::copy(
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin(), output.v.begin(), output.w.begin(), output.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
output.u.begin()+size, output.v.begin()+size, output.w.begin()+size, output.id.begin()+size)),
thrust::make_zip_iterator(thrust::make_tuple(
output.v.begin()+size, output.u.begin()+size, output.w.begin()+size, output.id.begin()+size))
);
}
void boruvka_mst(
Edges& edges,
thrust::device_vector<intT>& supervertices,
thrust::device_vector<intT>& mst_edges,
intT &n_mst)
{
if (!edges.n_edges) return;
assert(supervertices.size() == edges.n_vertices);
size_t n_edges = edges.n_edges;
size_t n_vertices = edges.n_vertices;
thrust::device_vector<intT> succ_id(n_vertices);
thrust::device_vector<intT> succ_indices(n_vertices);
thrust::device_vector<intT> succ_temp(n_vertices);
thrust::device_vector<int> indices(n_edges);
thrust::device_vector<int> flags(n_edges);
Edges edges_temp(edges.n_edges, edges.n_vertices);
while (1) {
if (n_edges == 1) {
mst_edges[n_mst++] = edges.id[0];
return;
}
thrust::sequence(indices.begin(), indices.begin() + n_edges);
thrust::sort_by_key(edges.u.begin(), edges.u.begin() + n_edges, indices.begin());
thrust::gather(indices.begin(), indices.begin() + n_edges,
thrust::make_zip_iterator(thrust::make_tuple(
edges.v.begin(), edges.w.begin(), edges.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin(), edges_temp.w.begin(), edges_temp.id.begin())));
edges_temp.v.swap(edges.v);
edges_temp.w.swap(edges.w);
edges_temp.id.swap(edges.id);
auto new_last = thrust::reduce_by_key(
edges.u.begin(), edges.u.begin() + n_edges,
thrust::make_zip_iterator(thrust::make_tuple(
edges.w.begin(), edges.v.begin(), edges.id.begin())),
edges_temp.u.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.w.begin(), edges_temp.v.begin(), edges_temp.id.begin())),
thrust::equal_to<intT>(),
binop_tuple_minimum());
size_t n_min_edges = new_last.first - edges_temp.u.begin();
// succ_indices is temporary for succ, the array of successors
thrust::fill(succ_id.begin(), succ_id.end(), -1);
thrust::scatter(
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin(), edges_temp.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.v.begin() + n_min_edges, edges_temp.id.begin() + n_min_edges)),
edges_temp.u.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
supervertices.begin(), succ_id.begin())));
// succ_tmp stores which succ are to be saved (1)/ dumped
// after this, succ_indices stores the array of successors
// since we maintain a global supervertices, we need to use succ_id to know which vertices represents the newly generated mst edges
remove_circles<<<(edges.n_vertices + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(supervertices.data()), edges.n_vertices,
thrust::raw_pointer_cast(succ_id.data()),
thrust::raw_pointer_cast(succ_indices.data()),
thrust::raw_pointer_cast(succ_temp.data()));
supervertices.swap(succ_indices);
thrust::exclusive_scan(succ_temp.begin(), succ_temp.begin() + n_vertices,
succ_indices.begin());
// save new mst edges
thrust::scatter_if(succ_id.begin(), succ_id.begin() + n_vertices,
succ_indices.begin(), succ_temp.begin(), mst_edges.begin() + n_mst);
n_mst += succ_indices[n_vertices-1] + succ_temp[n_vertices-1];
// generating super vertices (new vertices)
//thrust::sequence(succ_indices.begin(), succ_indices.begin() + n_vertices);
merge_vertices<<<(edges.n_vertices + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(supervertices.data()), edges.n_vertices);
// generating new edges
mark_edges_to_keep<<<(n_edges + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(edges.u.data()),
thrust::raw_pointer_cast(edges.v.data()),
thrust::raw_pointer_cast(supervertices.data()),
thrust::raw_pointer_cast(flags.data()), n_edges);
thrust::exclusive_scan(flags.begin(), flags.begin() + n_edges,
indices.begin());
intT new_edge_size = indices[n_edges-1] + flags[n_edges-1];
if (!new_edge_size) { return; }
thrust::scatter_if(
thrust::make_zip_iterator(thrust::make_tuple(
edges.u.begin(), edges.v.begin(), edges.w.begin(), edges.id.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
edges.u.begin() + n_edges, edges.v.begin() + n_edges, edges.w.begin() + n_edges, edges.id.begin() + n_edges)),
indices.begin(), flags.begin(),
thrust::make_zip_iterator(thrust::make_tuple(
edges_temp.u.begin(), edges_temp.v.begin(), edges_temp.w.begin(), edges_temp.id.begin()))
);
update_edges_with_new_vertices<<<(new_edge_size + BlockSize - 1) / BlockSize, BlockSize>>>
(thrust::raw_pointer_cast(edges_temp.v.data()),
thrust::raw_pointer_cast(edges_temp.u.data()),
thrust::raw_pointer_cast(supervertices.data()), new_edge_size);
edges.u.swap(edges_temp.u);
edges.v.swap(edges_temp.v);
edges.w.swap(edges_temp.w);
edges.id.swap(edges_temp.id);
assert(n_edges != new_edge_size);
n_edges = new_edge_size;
}
}
//--------------------------------------------------------------------------------
// top level mst
//--------------------------------------------------------------------------------
std::pair<intT*,intT> mst(wghEdgeArray<intT> G)
{
startTime();
UndirectedEdges edges(G);
nextTime("prepare graph");
Edges subgraph;
subgraph.n_vertices = G.n;
thrust::device_vector<intT> supervertices(G.n);
thrust::device_vector<intT> mst_edges(G.m);
intT n_mst = 0;
thrust::sequence(supervertices.begin(), supervertices.end());
auto split_indices = split_graph(edges);
for (auto it = split_indices.begin(); ; ) {
contract_and_build_subgraph(
it->first, it->second, edges, supervertices,
subgraph);
// this step, contrary to the paper, also includes connect components, by update the global super vertices
boruvka_mst(subgraph, supervertices, mst_edges, n_mst);
if (split_indices.end() == (++it)) break;
}
// fetch result ids, stored to edges.id temporarily
thrust::gather(mst_edges.begin(), mst_edges.begin() + n_mst,
edges.result_id.begin(), edges.id.begin());
intT *result_mst_edges = new intT[n_mst];
thrust::copy(edges.id.begin(), edges.id.begin() + n_mst, result_mst_edges);
return make_pair(result_mst_edges, n_mst);
}
|
4ea0ebabc3119c48fe7cb3073691fa09e77ca409.hip | // !!! This is a file automatically generated by hipify!!!
#include <memory>
#include <string.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include "../include/NVStrings.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/test.cu -L. -lNVStrings -o test --linker-options -rpath,.:
//
// csv file contents in device memory
void* d_fileContents = 0;
// return a vector of DString's we wish to process
std::pair<const char*,size_t>* setupTest(int& linesCount, int column)
{
//FILE* fp = fopen("../../data/1420-rows.csv", "rb");
FILE* fp = fopen("../../data/7584-rows.csv", "rb");
if( !fp )
{
printf("missing csv file\n");
return 0;
}
fseek(fp, 0, SEEK_END);
int fileSize = (int)ftell(fp);
fseek(fp, 0, SEEK_SET);
printf("File size = %d bytes\n", fileSize);
if( fileSize < 2 )
{
fclose(fp);
return 0;
}
// load file into memory
int contentsSize = fileSize+2;
char* contents = new char[contentsSize+2];
fread(contents, 1, fileSize, fp);
contents[fileSize] = '\r'; // line terminate
contents[fileSize+1] = 0; // and null-terminate
fclose(fp);
// find lines -- compute offsets vector values
thrust::host_vector<int> lineOffsets;
char* ptr = contents;
while( *ptr )
{
char ch = *ptr;
if( ch=='\r' )
{
*ptr = 0;
while(ch && (ch < ' ')) ch = *(++ptr);
lineOffsets.push_back((int)(ptr - contents));
continue;
}
++ptr;
}
linesCount = (int)lineOffsets.size();
printf("Found %d lines\n",linesCount);
// copy file contents into device memory
char* d_contents = 0;
hipMalloc(&d_contents,contentsSize);
hipMemcpy(d_contents,contents,contentsSize,hipMemcpyHostToDevice);
delete contents; // done with the host data
// copy offsets vector into device memory
thrust::device_vector<int> offsets(lineOffsets);
int* d_offsets = offsets.data().get();
// build empty output vector of DString*'s
--linesCount; // removed header line
std::pair<const char*,size_t>* d_column1 = 0;
hipMalloc(&d_column1, linesCount * sizeof(std::pair<const char*,size_t>));
// create a vector of DStrings using the first column of each line
thrust::for_each_n(thrust::device,
thrust::make_counting_iterator<size_t>(0), linesCount,
[d_contents, d_offsets, column, d_column1] __device__(size_t idx){
// probably some more elegant way to do this
int lineOffset = d_offsets[idx];
int lineLength = d_offsets[idx+1] - lineOffset;
d_column1[idx].first = (const char*)0;
if( lineLength < 1 )
return;
char* line = &(d_contents[lineOffset]);
char* stringStart = line;
int columnLength = 0, col = 0;
for( int i=0; (i < lineLength); ++i )
{
if( line[i] && line[i] != ',' )
{
++columnLength;
continue;
}
if( col++ >= column )
break;
stringStart = line + i + 1;
columnLength = 0;
}
if( columnLength==0 )
return;
// add string to vector array
d_column1[idx].first = (const char*)stringStart;
d_column1[idx].second = (size_t)columnLength;
});
//
hipDeviceSynchronize();
d_fileContents = d_contents;
return d_column1;
}
int main( int argc, char** argv )
{
//NVStrings::initLibrary();
int count = 0;
std::pair<const char*,size_t>* column1 = setupTest(count,1);
if( column1==0 )
return -1;
NVStrings* dstrs = NVStrings::create_from_index( column1, count );
hipFree(d_fileContents); // csv data not needed once dstrs is created
hipFree(column1); // string index data has done its job as well
std::vector<NVStrings*> ncolumns;
dstrs->split_column( " ", -1, ncolumns);
printf("split_columns = %d\n",(int)ncolumns.size());
//
int basize = (count+7)/8;
unsigned char* d_bitarray = new unsigned char[basize];
//hipMalloc(&d_bitarray,basize);
for( int idx=0; idx < (int)ncolumns.size(); ++idx )
{
NVStrings* ds = ncolumns[idx];
int ncount = ds->set_null_bitarray(d_bitarray,true,false);
printf("%d: null count = %d/%d\n",idx,ncount,count);
//for( int jdx=0; jdx < basize; ++jdx )
// printf("%02x,",(int)d_bitarray[jdx]);
printf("\n");
}
//hipFree(d_bitarray);
delete d_bitarray;
// show column values
//char** list = new char*[count];
//ncolumns[ncolumns.size()-1]->to_host(list,0,count);
//for( int idx=0; idx < count; ++idx )
// printf("%s,",list[idx]);
//printf("\n");
//delete list;
//ncolumns[0]->print();
return 0;
} | 4ea0ebabc3119c48fe7cb3073691fa09e77ca409.cu | #include <memory>
#include <string.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include "../include/NVStrings.h"
//
// cd ../build
// nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/test.cu -L. -lNVStrings -o test --linker-options -rpath,.:
//
// csv file contents in device memory
void* d_fileContents = 0;
// return a vector of DString's we wish to process
std::pair<const char*,size_t>* setupTest(int& linesCount, int column)
{
//FILE* fp = fopen("../../data/1420-rows.csv", "rb");
FILE* fp = fopen("../../data/7584-rows.csv", "rb");
if( !fp )
{
printf("missing csv file\n");
return 0;
}
fseek(fp, 0, SEEK_END);
int fileSize = (int)ftell(fp);
fseek(fp, 0, SEEK_SET);
printf("File size = %d bytes\n", fileSize);
if( fileSize < 2 )
{
fclose(fp);
return 0;
}
// load file into memory
int contentsSize = fileSize+2;
char* contents = new char[contentsSize+2];
fread(contents, 1, fileSize, fp);
contents[fileSize] = '\r'; // line terminate
contents[fileSize+1] = 0; // and null-terminate
fclose(fp);
// find lines -- compute offsets vector values
thrust::host_vector<int> lineOffsets;
char* ptr = contents;
while( *ptr )
{
char ch = *ptr;
if( ch=='\r' )
{
*ptr = 0;
while(ch && (ch < ' ')) ch = *(++ptr);
lineOffsets.push_back((int)(ptr - contents));
continue;
}
++ptr;
}
linesCount = (int)lineOffsets.size();
printf("Found %d lines\n",linesCount);
// copy file contents into device memory
char* d_contents = 0;
cudaMalloc(&d_contents,contentsSize);
cudaMemcpy(d_contents,contents,contentsSize,cudaMemcpyHostToDevice);
delete contents; // done with the host data
// copy offsets vector into device memory
thrust::device_vector<int> offsets(lineOffsets);
int* d_offsets = offsets.data().get();
// build empty output vector of DString*'s
--linesCount; // removed header line
std::pair<const char*,size_t>* d_column1 = 0;
cudaMalloc(&d_column1, linesCount * sizeof(std::pair<const char*,size_t>));
// create a vector of DStrings using the first column of each line
thrust::for_each_n(thrust::device,
thrust::make_counting_iterator<size_t>(0), linesCount,
[d_contents, d_offsets, column, d_column1] __device__(size_t idx){
// probably some more elegant way to do this
int lineOffset = d_offsets[idx];
int lineLength = d_offsets[idx+1] - lineOffset;
d_column1[idx].first = (const char*)0;
if( lineLength < 1 )
return;
char* line = &(d_contents[lineOffset]);
char* stringStart = line;
int columnLength = 0, col = 0;
for( int i=0; (i < lineLength); ++i )
{
if( line[i] && line[i] != ',' )
{
++columnLength;
continue;
}
if( col++ >= column )
break;
stringStart = line + i + 1;
columnLength = 0;
}
if( columnLength==0 )
return;
// add string to vector array
d_column1[idx].first = (const char*)stringStart;
d_column1[idx].second = (size_t)columnLength;
});
//
cudaThreadSynchronize();
d_fileContents = d_contents;
return d_column1;
}
int main( int argc, char** argv )
{
//NVStrings::initLibrary();
int count = 0;
std::pair<const char*,size_t>* column1 = setupTest(count,1);
if( column1==0 )
return -1;
NVStrings* dstrs = NVStrings::create_from_index( column1, count );
cudaFree(d_fileContents); // csv data not needed once dstrs is created
cudaFree(column1); // string index data has done its job as well
std::vector<NVStrings*> ncolumns;
dstrs->split_column( " ", -1, ncolumns);
printf("split_columns = %d\n",(int)ncolumns.size());
//
int basize = (count+7)/8;
unsigned char* d_bitarray = new unsigned char[basize];
//cudaMalloc(&d_bitarray,basize);
for( int idx=0; idx < (int)ncolumns.size(); ++idx )
{
NVStrings* ds = ncolumns[idx];
int ncount = ds->set_null_bitarray(d_bitarray,true,false);
printf("%d: null count = %d/%d\n",idx,ncount,count);
//for( int jdx=0; jdx < basize; ++jdx )
// printf("%02x,",(int)d_bitarray[jdx]);
printf("\n");
}
//cudaFree(d_bitarray);
delete d_bitarray;
// show column values
//char** list = new char*[count];
//ncolumns[ncolumns.size()-1]->to_host(list,0,count);
//for( int idx=0; idx < count; ++idx )
// printf("%s,",list[idx]);
//printf("\n");
//delete list;
//ncolumns[0]->print();
return 0;
} |
887ca6a50961d476ea7eaaa40547a1b013290bc2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
using namespace glm;
/*
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
*/
//GLOBALS
dim3 threadsPerBlock(blockSize);
int numObjects;
const float radius = 50.0f;
const float scene_scale = 500; //size of the height map in simulation space
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__
void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale )
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = (scale-50)*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = rand.z;
arr[index].w = 0.0f;
}
}
//Determine velocity from the distance from the center star. Not super physically accurate because
//the mass ratio is too close, but it makes for an interesting looking scene
__global__
void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
thrust::default_random_engine rng(hash(index*(time + index)*N ));
thrust::uniform_real_distribution<float> u01(.2, 10);
//thrust::uniform_real_distribution<float> u02(-PI, PI);
thrust::uniform_real_distribution<float> u03(-PI, PI);
float theta = (float)u03(rng);
float phi = (float)u03(rng);
arr[index] = (float)u01(rng)*glm::vec3(sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta));
/*
glm::vec3 rand = 10.0f*(generateRandomNumberFromThread(time+index*N+threadIdx.x, index));
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = rand.z;*/
}
}
//Generate randomized starting velocities in the XY plane
__global__
void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0;//rand.z;
}
}
__device__
vec3 calculateAlignment(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averageVelocity = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averageVelocity += velocities[j];
}
}
}
if (numInRadius > 0)
return 1.0f*(((1.0f/(float)numInRadius) * averageVelocity) - current_boid_vel); //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void alignment(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 accel = calculateAlignment(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += accel;
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
__device__
vec3 calculateCohesion(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averagePosition = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averagePosition += vec3(positions[j]);
}
}
}
if (numInRadius > 0)
return 1.0f*(((1.0f/(float)numInRadius) * averagePosition) - vec3(current_boid_pos)); //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void cohesion(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 accel = calculateCohesion(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += accel;
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
__device__
vec3 calculateSeparation(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averageDirection = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averageDirection += vec3(current_boid_pos-positions[j]);
}
}
}
if (numInRadius > 0)
return (1.0f/(float)numInRadius) * averageDirection; //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void separation(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 avgDir = calculateSeparation(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += 1.0f*(avgDir);
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
//Simple Euler integration scheme
__global__
void updateF(int time, int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc,float radius)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
glm::vec4 my_pos;
glm::vec3 my_vel;
glm::vec3 accel;
if(index < N) {
my_pos = pos[index];
my_vel = vel[index];
acc[index] =(generateRandomNumberFromThread(time, index) - 0.5f)*my_vel;
if(length(my_pos) > 500){
vel[index] = reflect(my_vel,normalize(vec3(-my_pos)));
pos[index] = normalize(pos[index])*499;
}
//acc[index] = vec3(0,0,0);//2.0f*(generateRandomNumberFromThread(time, index));
}
alignment(N,pos,vel,acc,radius);
cohesion(N,pos,vel,acc,radius);
separation(N,pos,vel,acc,radius);
}
__global__
void updateS(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N)
{
vel[index] += acc[index] * dt;
vel[index] = clamp(vel[index], -6, 6);
pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = -2.0f / s_scale;
float c_scale_h = -2.0f / s_scale;
if(index<N)
{
vbo[4*index+0] = pos[index].x*c_scale_w;
vbo[4*index+1] = pos[index].y*c_scale_h;
vbo[4*index+2] = 0;
vbo[4*index+3] = 1;
}
}
//Update the texture pixel buffer object
//(This texture is where openGL pulls the data for the height map)
__global__
void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int x = index % width;
int y = index / width;
float w2 = width / 2.0;
float h2 = height / 2.0;
float c_scale_w = width / s_scale;
float c_scale_h = height / s_scale;
glm::vec3 color(0.05, 0.15, 0.3);
///glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos);
if(x<width && y<height)
{
//float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z));
float mag = 1.0f;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = (mag < 1.0f) ? mag : 1.0f;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize)));
hipMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaNBodyUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
hipLaunchKernelGGL(( updateF), dim3(fullBlocksPerGrid), dim3(blockSize), blockSize*sizeof(glm::vec4), 0, 3, numObjects, dt, dev_pos, dev_vel, dev_acc, radius);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( updateS), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, vbodptr, width, height, scene_scale);
hipDeviceSynchronize();
}
void cudaUpdatePBO(float4 * pbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(blockSize)));
hipLaunchKernelGGL(( sendToPBO), dim3(fullBlocksPerGrid), dim3(blockSize), blockSize*sizeof(glm::vec4), 0, numObjects, dev_pos, pbodptr, width, height, scene_scale);
hipDeviceSynchronize();
}
| 887ca6a50961d476ea7eaaa40547a1b013290bc2.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
using namespace glm;
/*
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
*/
//GLOBALS
dim3 threadsPerBlock(blockSize);
int numObjects;
const float radius = 50.0f;
const float scene_scale = 500; //size of the height map in simulation space
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__
void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale )
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = (scale-50)*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = rand.z;
arr[index].w = 0.0f;
}
}
//Determine velocity from the distance from the center star. Not super physically accurate because
//the mass ratio is too close, but it makes for an interesting looking scene
__global__
void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
thrust::default_random_engine rng(hash(index*(time + index)*N ));
thrust::uniform_real_distribution<float> u01(.2, 10);
//thrust::uniform_real_distribution<float> u02(-PI, PI);
thrust::uniform_real_distribution<float> u03(-PI, PI);
float theta = (float)u03(rng);
float phi = (float)u03(rng);
arr[index] = (float)u01(rng)*glm::vec3(sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta));
/*
glm::vec3 rand = 10.0f*(generateRandomNumberFromThread(time+index*N+threadIdx.x, index));
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = rand.z;*/
}
}
//Generate randomized starting velocities in the XY plane
__global__
void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0;//rand.z;
}
}
__device__
vec3 calculateAlignment(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averageVelocity = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averageVelocity += velocities[j];
}
}
}
if (numInRadius > 0)
return 1.0f*(((1.0f/(float)numInRadius) * averageVelocity) - current_boid_vel); //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void alignment(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 accel = calculateAlignment(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += accel;
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
__device__
vec3 calculateCohesion(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averagePosition = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averagePosition += vec3(positions[j]);
}
}
}
if (numInRadius > 0)
return 1.0f*(((1.0f/(float)numInRadius) * averagePosition) - vec3(current_boid_pos)); //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void cohesion(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 accel = calculateCohesion(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += accel;
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
__device__
vec3 calculateSeparation(int N, vec4 current_boid_pos, vec4* other_boids_pos, vec3 current_boid_vel, vec3* other_boids_vel, vec3* acc, float radius){
int numInRadius = 0;
int index;
float distance;
float angle;
int numberOfBlocks = (int)ceil((float)N/blockSize);
vec3 averageDirection = vec3(0,0,0);
__shared__ vec4 positions[blockSize];
__shared__ vec3 velocities[blockSize];
for(int i = 0; i < numberOfBlocks; i++){
index = (i*blockSize)+threadIdx.x;
if(index < N){
positions[threadIdx.x] = other_boids_pos[index];
velocities[threadIdx.x] = other_boids_vel[index];
}
__syncthreads();
for(int j = 0; j < blockSize && j + i*blockSize < N; j++){
distance = length(current_boid_pos - positions[j]);
vec3 vectorDist = vec3(positions[j]-current_boid_pos);
vectorDist = (-1.0f/distance)*vectorDist;
vec3 normalCurrentBoidVel = (-1.0f/length(current_boid_vel))*current_boid_vel;
angle = glm::dot(normalCurrentBoidVel,vectorDist);
if(distance <= radius && abs(angle) < abs(cos((float)fieldOfView))){
numInRadius++;
averageDirection += vec3(current_boid_pos-positions[j]);
}
}
}
if (numInRadius > 0)
return (1.0f/(float)numInRadius) * averageDirection; //returns average velocity of birds within radius
else
return vec3(0,0,0);
}
__device__
void separation(int N, vec4 * pos, vec3* vel, vec3* acc, float radius){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < N){
vec4 current_boid_pos = pos[index];
vec3 current_boid_vel = vel[index];
vec3 avgDir = calculateSeparation(N,current_boid_pos,pos,current_boid_vel,vel,acc,radius);
acc[index] += 1.0f*(avgDir);
//printf("%f %f %f\n", acc[index][0], acc[index][1], acc[index][2]);
}
}
//Simple Euler integration scheme
__global__
void updateF(int time, int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc,float radius)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
glm::vec4 my_pos;
glm::vec3 my_vel;
glm::vec3 accel;
if(index < N) {
my_pos = pos[index];
my_vel = vel[index];
acc[index] =(generateRandomNumberFromThread(time, index) - 0.5f)*my_vel;
if(length(my_pos) > 500){
vel[index] = reflect(my_vel,normalize(vec3(-my_pos)));
pos[index] = normalize(pos[index])*499;
}
//acc[index] = vec3(0,0,0);//2.0f*(generateRandomNumberFromThread(time, index));
}
alignment(N,pos,vel,acc,radius);
cohesion(N,pos,vel,acc,radius);
separation(N,pos,vel,acc,radius);
}
__global__
void updateS(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N)
{
vel[index] += acc[index] * dt;
vel[index] = clamp(vel[index], -6, 6);
pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = -2.0f / s_scale;
float c_scale_h = -2.0f / s_scale;
if(index<N)
{
vbo[4*index+0] = pos[index].x*c_scale_w;
vbo[4*index+1] = pos[index].y*c_scale_h;
vbo[4*index+2] = 0;
vbo[4*index+3] = 1;
}
}
//Update the texture pixel buffer object
//(This texture is where openGL pulls the data for the height map)
__global__
void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int x = index % width;
int y = index / width;
float w2 = width / 2.0;
float h2 = height / 2.0;
float c_scale_w = width / s_scale;
float c_scale_h = height / s_scale;
glm::vec3 color(0.05, 0.15, 0.3);
///glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos);
if(x<width && y<height)
{
//float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z));
float mag = 1.0f;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = (mag < 1.0f) ? mag : 1.0f;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize)));
cudaMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
generateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale);
checkCUDAErrorWithLine("Kernel failed!");
generateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaNBodyUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
updateF<<<fullBlocksPerGrid, blockSize, blockSize*sizeof(glm::vec4)>>>(3, numObjects, dt, dev_pos, dev_vel, dev_acc, radius);
checkCUDAErrorWithLine("Kernel failed!");
updateS<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize)));
sendToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, vbodptr, width, height, scene_scale);
cudaThreadSynchronize();
}
void cudaUpdatePBO(float4 * pbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(blockSize)));
sendToPBO<<<fullBlocksPerGrid, blockSize, blockSize*sizeof(glm::vec4)>>>(numObjects, dev_pos, pbodptr, width, height, scene_scale);
cudaThreadSynchronize();
}
|
3e6e514ec018f8e42fc256ce5f9976aa0daabd58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_kernel.cuh"
hiprandState_t *devStates;
// matrix mult
__global__
void cuda_Matmul_forward_kernel(const float *a, const float *b, float *c, const uint m, const uint n, const uint p) {
__shared__ float tileA[TILE_SIZE][TILE_SIZE];
__shared__ float tileB[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (n-1) / TILE_SIZE + 1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < m && i * TILE_SIZE + tx < n)
tileA[ty][tx] = a[row * n + i * TILE_SIZE + tx];
else
tileA[ty][tx] = 0;
if (col < p && i * TILE_SIZE + ty < n)
tileB[ty][tx] = b[(i * TILE_SIZE + ty) * p + col];
else
tileB[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileA[ty][j] * tileB[j][tx];
__syncthreads();
}
if (row < m && col < p)
c[row * p + col] = res;
}
__global__
void cuda_Matmul_backward_A_kernel(float *a_grad, const float *b, const float *c_grad, const uint m, const uint n, const uint p) {
__shared__ float tileB[TILE_SIZE][TILE_SIZE];
__shared__ float tileCGrad[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (p-1) / TILE_SIZE + 1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < m && i * TILE_SIZE + tx < p)
tileCGrad[ty][tx] = c_grad[row * p + i * TILE_SIZE + tx];
else
tileCGrad[ty][tx] = 0;
if (col < n && i * TILE_SIZE + ty < p)
tileB[ty][tx] = b[col * p + i * TILE_SIZE + ty];
else
tileB[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileCGrad[ty][j] * tileB[j][tx];
__syncthreads();
}
if (row < m && col < n)
a_grad[row * n + col] = res;
}
__global__
void cuda_Matmul_backward_B_kernel(float *b_grad, const float *a, const float *c_grad, const uint m, const uint n, const uint p) {
__shared__ float tileA[TILE_SIZE][TILE_SIZE];
__shared__ float tileCGrad[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (m-1)/TILE_SIZE+1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < n && i * TILE_SIZE + tx < m)
tileA[ty][tx] = a[(i * TILE_SIZE + tx) * n + row];
else
tileA[ty][tx] = 0;
if (col < p && i * TILE_SIZE + ty < m)
tileCGrad[ty][tx] = c_grad[(i * TILE_SIZE + ty) * p + col];
else
tileCGrad[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileA[ty][j] * tileCGrad[j][tx];
__syncthreads();
}
if (row < n && col < p)
b_grad[row * p + col] = res;
}
// sparse matmul
__global__
void cuda_SparseMatmul_forward_kernel(float *a_in, float *b_in, float *c_in, int *indptr, int *indices, int p) {
int i = blockIdx.x;
int k = threadIdx.x;
#pragma unroll
for (int jj = indptr[i]; jj < indptr[i + 1]; jj++) {
int j = indices[jj];
c_in[i * p + k] += a_in[jj] * b_in[j * p + k];
}
}
__global__
void cuda_SparseMatmul_backward_kernel(float *a_in, float *b_in, float *c_in, int *indptr, int *indices, int p) {
int i = blockIdx.x;
int k = threadIdx.x;
#pragma unroll
for (int jj = indptr[i]; jj < indptr[i + 1]; jj++){
int j = indices[jj];
b_in[j * p + k] += c_in[i * p + k] * a_in[jj];
}
}
// graph sum
__global__
void cuda_GraphSum_forward_kernel(float *d_in_data, float *d_out_data, int *d_indptr, int *d_indices, int dim, int numNodes) {
int src = blockIdx.x;
int j = threadIdx.x;
int ptr_src_0 = d_indptr[src];
int ptr_stc_1 = d_indptr[src + 1];
#pragma unroll
for (int i = ptr_src_0; i < ptr_stc_1; i++) {
int dst = d_indices[i];
float coef = 1.0 / sqrtf(
(ptr_stc_1 - ptr_src_0) * (d_indptr[dst + 1] - d_indptr[dst])
);
// This only works for undirected graphs. Should be out[dst] += coef * in[src]]
d_out_data[src * dim + j] += coef * d_in_data[dst * dim + j];
}
}
__global__
void cuda_GraphSum_backward_kernel(float *d_in_grad, float *d_out_grad, int *d_indptr, int *d_indices, int dim, int numNodes) {
int src = blockIdx.x;
int j = threadIdx.x;
int ptr_src_0 = d_indptr[src];
int ptr_stc_1 = d_indptr[src + 1];
#pragma unroll
for (int i = ptr_src_0; i < ptr_stc_1; i++) {
int dst = d_indices[i];
float coef = 1.0 / sqrtf(
(ptr_stc_1 - ptr_src_0) * (d_indptr[dst + 1] - d_indptr[dst])
);
// This only works for undirected graphs. Should be out[dst] += coef * in[src]
d_in_grad[src * dim + j] += coef * d_out_grad[dst * dim + j];
}
}
// cross entropy
__global__
void cuda_CrossEntropy_forward_A_kernel(float* logits_data, float* logits_grad, bool training, int num_classes, int* truth, int* count, float* thread_loss, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
if (truth[i] < 0) {
count[i] = 0;
return;
}
float *logit = &logits_data[i * num_classes];
float max_logit = -1e30, sum_exp = 0;
#pragma unroll
for (int j = 0; j < num_classes; j++)
max_logit = fmax(max_logit, logit[j]);
#pragma unroll
for (int j = 0; j < num_classes; j++) {
logit[j] -= max_logit;
sum_exp += expf(logit[j]);
}
if (training) {
#pragma unroll
for (int j = 0; j < num_classes; j++) {
float prob = expf(logit[j]) / sum_exp;
logits_grad[i * num_classes + j] = prob;
}
logits_grad[i * num_classes + truth[i]] -= 1.0;
}
count[i] = 1;
thread_loss[i] = logf(sum_exp) - logit[truth[i]];
}
__global__
void cuda_CrossEntropy_forward_B_kernel(float *logits_grad, int size, int count) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) logits_grad[i] /= count;
}
// ReLU
__global__
void cuda_ReLU_forward_kernel(float *d_in_data, bool *d_mask, const long unsigned int datasize, bool training) {
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= datasize) return;
bool keep = d_in_data[i] > 0;
if (training) d_mask[i] = keep;
if (!keep) d_in_data[i] = 0;
}
__global__
void cuda_ReLU_backward_kernel(float *d_in_grad, bool *d_mask, long unsigned int datasize) {
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= datasize) return;
if (!d_mask[i]) d_in_grad[i] = 0;
}
// Dropout
__global__
void cuda_Dropout_forward_kernel(float *in, int *mask, hiprandState_t *state, const uint size, const float p, const float scale, const bool useMask) {
float x;
bool keep;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
x = hiprand_uniform(&state[id % MAX_THREAD_PER_BLOCK]);
keep = x >= p;
in[id] *= keep ? scale : 0;
if (useMask) mask[id] = keep;
}
}
__global__
void cuda_Dropout_backward_kernel(float *in_grad, const int *mask, const uint size, const float scale) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) in_grad[id] *= mask[id] ? scale : 0;
}
// rand state
__global__
void cuda_init_rand_kernel(hiprandState_t *state) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(1234, id, 0, &state[id]);
}
void cuda_init_random_state(const uint size) {
// malloc
CUDA_CHECK(hipMalloc((void**) &devStates, size * sizeof(hiprandState_t)));
dim3 block((size-1)/MAX_THREAD_PER_BLOCK + 1, 1, 1);
dim3 thread_in_block(MAX_THREAD_PER_BLOCK, 1, 1);
// kernel
hipLaunchKernelGGL(( cuda_init_rand_kernel), dim3(block),dim3(thread_in_block), 0, 0, devStates);
CUDA_CHECK(hipGetLastError());
// CUDA_CHECK(hipDeviceSynchronize());
}
void cuda_free_random_state() {
// free
CUDA_CHECK(hipFree(devStates));
}
// adam
__global__
void cuda_Adam_step_kernel(float* grad, float* data, float* m, float* v, bool decay, float weight_decay, float beta1, float beta2, float eps, float step_size, int varsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= varsize) return;
float g = grad[i];
if (decay) g += weight_decay * data[i];
m[i] = beta1 * m[i] + (1.0 - beta1) * g;
v[i] = beta2 * v[i] + (1.0 - beta2) * g * g;
data[i] -= step_size * m[i] / (sqrtf(v[i]) + eps);
}
__global__
void cuda_set_truth_kernel(int *truth, int *data_split, int *data_label, int current_split, int size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
truth[id] = data_split[id] == current_split ? data_label[id] : -1;
}
__global__
void cuda_Variable_glorot_kernel(float *data, hiprandState_t *state, int size, float scale) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
data[id] = (hiprand_uniform(&state[id % MAX_THREAD_PER_BLOCK]) - 0.5) * scale;
}
| 3e6e514ec018f8e42fc256ce5f9976aa0daabd58.cu | #include "cuda_kernel.cuh"
curandState *devStates;
// matrix mult
__global__
void cuda_Matmul_forward_kernel(const float *a, const float *b, float *c, const uint m, const uint n, const uint p) {
__shared__ float tileA[TILE_SIZE][TILE_SIZE];
__shared__ float tileB[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (n-1) / TILE_SIZE + 1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < m && i * TILE_SIZE + tx < n)
tileA[ty][tx] = a[row * n + i * TILE_SIZE + tx];
else
tileA[ty][tx] = 0;
if (col < p && i * TILE_SIZE + ty < n)
tileB[ty][tx] = b[(i * TILE_SIZE + ty) * p + col];
else
tileB[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileA[ty][j] * tileB[j][tx];
__syncthreads();
}
if (row < m && col < p)
c[row * p + col] = res;
}
__global__
void cuda_Matmul_backward_A_kernel(float *a_grad, const float *b, const float *c_grad, const uint m, const uint n, const uint p) {
__shared__ float tileB[TILE_SIZE][TILE_SIZE];
__shared__ float tileCGrad[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (p-1) / TILE_SIZE + 1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < m && i * TILE_SIZE + tx < p)
tileCGrad[ty][tx] = c_grad[row * p + i * TILE_SIZE + tx];
else
tileCGrad[ty][tx] = 0;
if (col < n && i * TILE_SIZE + ty < p)
tileB[ty][tx] = b[col * p + i * TILE_SIZE + ty];
else
tileB[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileCGrad[ty][j] * tileB[j][tx];
__syncthreads();
}
if (row < m && col < n)
a_grad[row * n + col] = res;
}
__global__
void cuda_Matmul_backward_B_kernel(float *b_grad, const float *a, const float *c_grad, const uint m, const uint n, const uint p) {
__shared__ float tileA[TILE_SIZE][TILE_SIZE];
__shared__ float tileCGrad[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y;
int row = by * TILE_SIZE + ty;
int col = bx * TILE_SIZE + tx;
int range = (m-1)/TILE_SIZE+1;
float res = 0;
#pragma unroll
for (int i = 0; i < range; i++) {
if (row < n && i * TILE_SIZE + tx < m)
tileA[ty][tx] = a[(i * TILE_SIZE + tx) * n + row];
else
tileA[ty][tx] = 0;
if (col < p && i * TILE_SIZE + ty < m)
tileCGrad[ty][tx] = c_grad[(i * TILE_SIZE + ty) * p + col];
else
tileCGrad[ty][tx] = 0;
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_SIZE; j++)
res += tileA[ty][j] * tileCGrad[j][tx];
__syncthreads();
}
if (row < n && col < p)
b_grad[row * p + col] = res;
}
// sparse matmul
__global__
void cuda_SparseMatmul_forward_kernel(float *a_in, float *b_in, float *c_in, int *indptr, int *indices, int p) {
int i = blockIdx.x;
int k = threadIdx.x;
#pragma unroll
for (int jj = indptr[i]; jj < indptr[i + 1]; jj++) {
int j = indices[jj];
c_in[i * p + k] += a_in[jj] * b_in[j * p + k];
}
}
__global__
void cuda_SparseMatmul_backward_kernel(float *a_in, float *b_in, float *c_in, int *indptr, int *indices, int p) {
int i = blockIdx.x;
int k = threadIdx.x;
#pragma unroll
for (int jj = indptr[i]; jj < indptr[i + 1]; jj++){
int j = indices[jj];
b_in[j * p + k] += c_in[i * p + k] * a_in[jj];
}
}
// graph sum
__global__
void cuda_GraphSum_forward_kernel(float *d_in_data, float *d_out_data, int *d_indptr, int *d_indices, int dim, int numNodes) {
int src = blockIdx.x;
int j = threadIdx.x;
int ptr_src_0 = d_indptr[src];
int ptr_stc_1 = d_indptr[src + 1];
#pragma unroll
for (int i = ptr_src_0; i < ptr_stc_1; i++) {
int dst = d_indices[i];
float coef = 1.0 / sqrtf(
(ptr_stc_1 - ptr_src_0) * (d_indptr[dst + 1] - d_indptr[dst])
);
// This only works for undirected graphs. Should be out[dst] += coef * in[src]]
d_out_data[src * dim + j] += coef * d_in_data[dst * dim + j];
}
}
__global__
void cuda_GraphSum_backward_kernel(float *d_in_grad, float *d_out_grad, int *d_indptr, int *d_indices, int dim, int numNodes) {
int src = blockIdx.x;
int j = threadIdx.x;
int ptr_src_0 = d_indptr[src];
int ptr_stc_1 = d_indptr[src + 1];
#pragma unroll
for (int i = ptr_src_0; i < ptr_stc_1; i++) {
int dst = d_indices[i];
float coef = 1.0 / sqrtf(
(ptr_stc_1 - ptr_src_0) * (d_indptr[dst + 1] - d_indptr[dst])
);
// This only works for undirected graphs. Should be out[dst] += coef * in[src]
d_in_grad[src * dim + j] += coef * d_out_grad[dst * dim + j];
}
}
// cross entropy
__global__
void cuda_CrossEntropy_forward_A_kernel(float* logits_data, float* logits_grad, bool training, int num_classes, int* truth, int* count, float* thread_loss, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
if (truth[i] < 0) {
count[i] = 0;
return;
}
float *logit = &logits_data[i * num_classes];
float max_logit = -1e30, sum_exp = 0;
#pragma unroll
for (int j = 0; j < num_classes; j++)
max_logit = fmax(max_logit, logit[j]);
#pragma unroll
for (int j = 0; j < num_classes; j++) {
logit[j] -= max_logit;
sum_exp += expf(logit[j]);
}
if (training) {
#pragma unroll
for (int j = 0; j < num_classes; j++) {
float prob = expf(logit[j]) / sum_exp;
logits_grad[i * num_classes + j] = prob;
}
logits_grad[i * num_classes + truth[i]] -= 1.0;
}
count[i] = 1;
thread_loss[i] = logf(sum_exp) - logit[truth[i]];
}
__global__
void cuda_CrossEntropy_forward_B_kernel(float *logits_grad, int size, int count) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) logits_grad[i] /= count;
}
// ReLU
__global__
void cuda_ReLU_forward_kernel(float *d_in_data, bool *d_mask, const long unsigned int datasize, bool training) {
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= datasize) return;
bool keep = d_in_data[i] > 0;
if (training) d_mask[i] = keep;
if (!keep) d_in_data[i] = 0;
}
__global__
void cuda_ReLU_backward_kernel(float *d_in_grad, bool *d_mask, long unsigned int datasize) {
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= datasize) return;
if (!d_mask[i]) d_in_grad[i] = 0;
}
// Dropout
__global__
void cuda_Dropout_forward_kernel(float *in, int *mask, curandState *state, const uint size, const float p, const float scale, const bool useMask) {
float x;
bool keep;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
x = curand_uniform(&state[id % MAX_THREAD_PER_BLOCK]);
keep = x >= p;
in[id] *= keep ? scale : 0;
if (useMask) mask[id] = keep;
}
}
__global__
void cuda_Dropout_backward_kernel(float *in_grad, const int *mask, const uint size, const float scale) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) in_grad[id] *= mask[id] ? scale : 0;
}
// rand state
__global__
void cuda_init_rand_kernel(curandState *state) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(1234, id, 0, &state[id]);
}
void cuda_init_random_state(const uint size) {
// malloc
CUDA_CHECK(cudaMalloc((void**) &devStates, size * sizeof(curandState)));
dim3 block((size-1)/MAX_THREAD_PER_BLOCK + 1, 1, 1);
dim3 thread_in_block(MAX_THREAD_PER_BLOCK, 1, 1);
// kernel
cuda_init_rand_kernel<<<block,thread_in_block>>>(devStates);
CUDA_CHECK(cudaGetLastError());
// CUDA_CHECK(cudaDeviceSynchronize());
}
void cuda_free_random_state() {
// free
CUDA_CHECK(cudaFree(devStates));
}
// adam
__global__
void cuda_Adam_step_kernel(float* grad, float* data, float* m, float* v, bool decay, float weight_decay, float beta1, float beta2, float eps, float step_size, int varsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= varsize) return;
float g = grad[i];
if (decay) g += weight_decay * data[i];
m[i] = beta1 * m[i] + (1.0 - beta1) * g;
v[i] = beta2 * v[i] + (1.0 - beta2) * g * g;
data[i] -= step_size * m[i] / (sqrtf(v[i]) + eps);
}
__global__
void cuda_set_truth_kernel(int *truth, int *data_split, int *data_label, int current_split, int size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
truth[id] = data_split[id] == current_split ? data_label[id] : -1;
}
__global__
void cuda_Variable_glorot_kernel(float *data, curandState *state, int size, float scale) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
data[id] = (curand_uniform(&state[id % MAX_THREAD_PER_BLOCK]) - 0.5) * scale;
}
|
b694f75dbd7b26a4f3b09e637c7345970a00103e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/contrib/PointSampling.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/SamplingKernel.h"
using namespace open3d::ml::contrib;
void furthest_point_sampling_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| b694f75dbd7b26a4f3b09e637c7345970a00103e.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/contrib/PointSampling.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/SamplingKernel.h"
using namespace open3d::ml::contrib;
void furthest_point_sampling_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
5d99a72c6c29d2da41a3805ac9fd653ced4d4d0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "onnx-tensorrt/onnxplugin.hpp"
using namespace ONNXPlugin;
static __device__ float sigmoid(float x){
return 1 / (1 + expf(-x));
}
static __global__ void MYSELU_kernel_fp32(const float* x, float* output, int edge) {
int position = threadIdx.x + blockDim.x * blockIdx.x;
if(position >= edge) return;
output[position] = x[position] * sigmoid(x[position]);
}
class MYSELU : public TRTPlugin {
public:
SetupPlugin(MYSELU);
virtual void config_finish() override{
printf("\033[33minit MYSELU config: %s\033[0m\n", config_->info_.c_str());
printf("weights count is %d\n", config_->weights_.size());
}
int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) override{
int n = inputs[0].count();
const int nthreads = 512;
int block_size = n < nthreads ? n : nthreads;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( MYSELU_kernel_fp32) , dim3(grid_size), dim3(block_size), 0, stream, inputs[0].ptr<float>(), outputs[0].ptr<float>(), n);
return 0;
}
};
RegisterPlugin(MYSELU); | 5d99a72c6c29d2da41a3805ac9fd653ced4d4d0b.cu |
#include <cuda_runtime.h>
#include "onnx-tensorrt/onnxplugin.hpp"
using namespace ONNXPlugin;
static __device__ float sigmoid(float x){
return 1 / (1 + expf(-x));
}
static __global__ void MYSELU_kernel_fp32(const float* x, float* output, int edge) {
int position = threadIdx.x + blockDim.x * blockIdx.x;
if(position >= edge) return;
output[position] = x[position] * sigmoid(x[position]);
}
class MYSELU : public TRTPlugin {
public:
SetupPlugin(MYSELU);
virtual void config_finish() override{
printf("\033[33minit MYSELU config: %s\033[0m\n", config_->info_.c_str());
printf("weights count is %d\n", config_->weights_.size());
}
int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) override{
int n = inputs[0].count();
const int nthreads = 512;
int block_size = n < nthreads ? n : nthreads;
int grid_size = (n + block_size - 1) / block_size;
MYSELU_kernel_fp32 <<<grid_size, block_size, 0, stream>>> (inputs[0].ptr<float>(), outputs[0].ptr<float>(), n);
return 0;
}
};
RegisterPlugin(MYSELU); |
c01f4b8fdad0510d54427178583ef8dea9839b56.hip | // !!! This is a file automatically generated by hipify!!!
//Libraries for cuda runtime
#include <rocblas.h>
#include <hiprand/hiprand.h>
//Standard C libraries
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "Display_Matrix.h"
#include "Random_Matrix.h"
#include "Inverse_Matrix.h"
#include "PInverse_Matrix.h"
#include "Load_Matrix.h"
#include "train.h"
#include "test.h"
//Main function
int main()
{
float *Mat_f;
float *beta;
float *op_matrix;
unsigned long training_samples = 50000;
unsigned long testing_samples = 10000;
unsigned long ip_num = 64;
unsigned long op_num = 10;
unsigned long hid_num = 20;
Mat_f= (float *)malloc(ip_num* hid_num* sizeof(float));
beta= (float *)malloc(hid_num* op_num* sizeof(float));
op_matrix= (float *)malloc(testing_samples* op_num* sizeof(float));
float *X_Train = (float *)malloc(training_samples * ip_num * sizeof(float));
float *Y_Train = (float *)malloc(training_samples * op_num * sizeof(float));
float *X_Test = (float *)malloc(testing_samples * ip_num * sizeof(float));
float *Y_Test = (float *)malloc(testing_samples * op_num * sizeof(float));
Import_Fromfile(X_Train,"features_cifar10/train_features.csv");
Import_Fromfile(Y_Train,"features_cifar10/train_labels.csv");
Import_Fromfile(X_Test,"features_cifar10/test_features.csv");
Import_Fromfile(Y_Test,"features_cifar10/test_labels.csv");
//// Calling a training function of ELM
Train_elm(X_Train,Y_Train,Mat_f,beta,ip_num,hid_num,op_num,training_samples);
//// Calling a testing function of ELM
Test_elm(X_Test,Y_Test,Mat_f,beta,op_matrix,ip_num,hid_num,op_num,testing_samples);
/// Output Matrix and Accuracy
Display_Matrix(op_matrix,Y_Test,testing_samples,op_num);
printf("\n");
return 0;
} | c01f4b8fdad0510d54427178583ef8dea9839b56.cu | //Libraries for cuda runtime
#include <cublas_v2.h>
#include <curand.h>
//Standard C libraries
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "Display_Matrix.h"
#include "Random_Matrix.h"
#include "Inverse_Matrix.h"
#include "PInverse_Matrix.h"
#include "Load_Matrix.h"
#include "train.h"
#include "test.h"
//Main function
int main()
{
float *Mat_f;
float *beta;
float *op_matrix;
unsigned long training_samples = 50000;
unsigned long testing_samples = 10000;
unsigned long ip_num = 64;
unsigned long op_num = 10;
unsigned long hid_num = 20;
Mat_f= (float *)malloc(ip_num* hid_num* sizeof(float));
beta= (float *)malloc(hid_num* op_num* sizeof(float));
op_matrix= (float *)malloc(testing_samples* op_num* sizeof(float));
float *X_Train = (float *)malloc(training_samples * ip_num * sizeof(float));
float *Y_Train = (float *)malloc(training_samples * op_num * sizeof(float));
float *X_Test = (float *)malloc(testing_samples * ip_num * sizeof(float));
float *Y_Test = (float *)malloc(testing_samples * op_num * sizeof(float));
Import_Fromfile(X_Train,"features_cifar10/train_features.csv");
Import_Fromfile(Y_Train,"features_cifar10/train_labels.csv");
Import_Fromfile(X_Test,"features_cifar10/test_features.csv");
Import_Fromfile(Y_Test,"features_cifar10/test_labels.csv");
//// Calling a training function of ELM
Train_elm(X_Train,Y_Train,Mat_f,beta,ip_num,hid_num,op_num,training_samples);
//// Calling a testing function of ELM
Test_elm(X_Test,Y_Test,Mat_f,beta,op_matrix,ip_num,hid_num,op_num,testing_samples);
/// Output Matrix and Accuracy
Display_Matrix(op_matrix,Y_Test,testing_samples,op_num);
printf("\n");
return 0;
} |
acfb4976d900dd62a178321841d240d3ac71cac2.hip | // !!! This is a file automatically generated by hipify!!!
#include "blas.h"
#include <stdio.h>
#include "utils.h"
#include "common.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
hipblasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
int M = 6,N=5;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * N + j + 1);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipFree (devPtrA);
hipblasDestroy(handle);
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
}
| acfb4976d900dd62a178321841d240d3ac71cac2.cu | #include "blas.h"
#include <stdio.h>
#include "utils.h"
#include "common.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
cublasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
int M = 6,N=5;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * N + j + 1);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree (devPtrA);
cublasDestroy(handle);
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
}
|
f4c2944f6efc371fd2d7476e2abc5f08ec16648a.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR:data race
//--blockDim=2 --gridDim=1 --no-inline
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <sm_atomic_functions.h>
#define N 2
__global__ void race_test (unsigned int* i, int* A)
{
int tid = threadIdx.x;
int j = atomicAdd(i,tid);
A[j] = tid;
}
| f4c2944f6efc371fd2d7476e2abc5f08ec16648a.cu | //xfail:BOOGIE_ERROR:data race
//--blockDim=2 --gridDim=1 --no-inline
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#include <sm_atomic_functions.h>
#define N 2
__global__ void race_test (unsigned int* i, int* A)
{
int tid = threadIdx.x;
int j = atomicAdd(i,tid);
A[j] = tid;
}
|
5aee6f77383c3103015a0e1479e4493fafe04634.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** Kernels for convUp, convDown, convOutp, maxpool, avgpool, maxpoolundo,
* avgpoolundo.
* These kernels are 10-20% slower than cuda-convnet2, but have no constraints
* on number of channels and support rectangular images and rectangular kernels.
* They use hipblasSgemm for convUp, convDown, convOutp.
* Data layout : Column-major
* data : (num_images, image_size_x, image_size_y, num_input_channels)
* filters : (num_output_channels, kernel_size_x, kernel_size_y, num_input_channels)
*/
#include "cudamat_conv_gemm.cuh"
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
#define MAX_MEMORY_BYTES (200 * (1 << 20))
inline void GetTempMemory(int num_images, int input_size, int num_output_channels,
int num_modules, float *input, float *output,
int* batch_size) {
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&input, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&output, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
if (hipSuccess == err1) hipFree(input);
if (hipSuccess == err2) hipFree(output);
err1 = hipMalloc((void**)&input, input_memory_size);
err2 = hipMalloc((void**)&output, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
*batch_size = 1;
} else {
*batch_size = max_batch_size;
}
}
void FreeTempMemory(float* input, float* output) {
hipFree(input);
hipFree(output);
}
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
__global__ void kExpand(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
int module_id_x = src_module_id % num_modules_x;
int module_id_y = src_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
target_id = num_images * num_modules_batch * (x + kernel_size_x * y);
source_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = 0;
}
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = images[source_id + im];
}
}
__syncthreads();
}
}
}
template <class Pooler>
__global__ void kPool(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput,
Pooler pooler) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id, target_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * num_modules * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
target_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
int regionSize = (endX - startX) * (endY - startY);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = pooler.getBaseValue();
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
source_id = num_images * (X + image_size_x * Y);
val = pooler(val, images[source_id + im]);
}
}
targets[target_id + im] = scaleOutput * pooler.output(val, regionSize);
}
}
__syncthreads();
}
__global__ void kAvgPoolUndo(float *derivs, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id;
derivs += num_images * num_modules * color;
targets += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
source_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
int regionSize = (endX - startX) * (endY - startY);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im] / regionSize;
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
atomicAdd(&targets[num_images * (X + image_size_x * Y) + im], val);
__syncthreads();
}
}
}
}
}
__global__ void kMaxPoolUndo(float * images, float *derivs, float* maxes, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id, target_id;
derivs += num_images * num_modules * color;
maxes += num_images * num_modules * color;
targets += num_images * image_size_x * image_size_y * color;
images += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
source_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im];
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * Y) + im;
if (images[target_id] == maxes[source_id + im]) {
atomicAdd(&targets[target_id], val);
}
__syncthreads();
}
}
}
}
}
__global__ void kContract(float *expanded_data, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int dst_module_id = module_id_offset + blockIdx.x;
int src_module_id = blockIdx.x;
int module_id_x = dst_module_id % num_modules_x;
int module_id_y = dst_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
targets += num_images * image_size_x * image_size_y * color;
expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
source_id = num_images * num_modules_batch * (x + kernel_size_x * y);
target_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
// do nothing.
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
atomicAdd(&targets[target_id + im], expanded_data[source_id + im]);
__syncthreads();
}
}
}
}
}
__global__ void kWriteRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = beta * data[im];
}
}
__global__ void kReadRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset) {
int c = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
data += num_images * (src_module_id + c * num_modules);
target += num_images * (dst_module_id + c * num_modules_batch);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = data[im];
}
}
__global__ void kWriteRowsMult(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
}
__global__ void kCrossMapDenoms(float* data, float* denoms,
int num_locs, int batch_locs, int batch_offset, float addScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
denoms[j * batch_locs] = 1 + addScale * sum;
}
}
}
__global__ void kCrossMapRNorm(float* data, float* target,
int num_locs, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += loc_id;
target += loc_id;
if (loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
target[j * num_locs] = data[j * num_locs] * __powf(1 + addScale * sum, -powScale);
}
}
}
__global__ void kCrossMapRNormUndo(float* data, float* deriv, float* denoms, float* target,
int num_locs, int batch_locs, int batch_offset, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
target += batch_offset + loc_id;
deriv += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += deriv[i * num_locs] * data[i * num_locs] * __powf(denoms[i * batch_locs], -powScale - 1);
}
target[j * num_locs] = deriv[j * num_locs] * __powf(denoms[j * batch_locs], -powScale) -
2 * addScale * powScale * data[j * num_locs] * sum;
}
}
}
void _convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D images_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
// Batchsize be multiple of 128 for max utilization, will still work if is isn't.
int num_threads_x = MIN(num_images, 128);
float *expanded_images = NULL, *expanded_target = NULL;
int num_modules_batch;
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_target, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
if (hipSuccess == err1) hipFree(expanded_images);
if (hipSuccess == err2) hipFree(expanded_target);
err1 = hipMalloc((void**)&expanded_images, input_memory_size);
err2 = hipMalloc((void**)&expanded_target, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 threads(num_threads_x);
dim3 blocks = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kExpand), dim3(blocks), dim3(threads), 0, 0, images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
hipblasSgemm('n', 't',
num_images * this_num_modules_batch, num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
1, expanded_images, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
dim3 blocks2 = dim3(this_num_modules_batch, num_output_channels);
if (scaleTargets == 0) {
hipLaunchKernelGGL(( kWriteRows), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleOutput);
} else {
hipLaunchKernelGGL(( kWriteRowsMult), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleTargets, scaleOutput);
}
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_images, expanded_target);
getLastCudaError("convUpGemm: kernel execution failed");
}
void _convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D derivs_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, 128); // Batchsize be multiple of 128 for max utilization, will still work if is isn't.
float *expanded_target = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_target, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_target, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
if (hipSuccess == err1) hipFree(expanded_target);
if (hipSuccess == err2) hipFree(expanded_derivs);
err1 = hipMalloc((void**)&expanded_target, input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
if (scaleTargets == 0) {
hipMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
hipblasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
dim3 threads(num_threads_x);
hipLaunchKernelGGL(( kReadRows), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
hipblasSgemm('n', 'n',
num_images * this_num_modules_batch, kernel_size_x * kernel_size_y * num_input_channels,
num_output_channels,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kContract), dim3(blocks2), dim3(threads), 0, 0, expanded_target, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_target, expanded_derivs);
getLastCudaError("convDownGemm: kernel execution failed");
}
void _convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D images_shape, Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3Mult = targets_shape.shape[3];
int kernel_size_y2 = targets_shape.shape[2];
int kernel_size_x2 = targets_shape.shape[1];
int num_output_channels3 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels * filterModuleMult == num_input_channels3Mult);
assert (num_images == images->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels3Mult == targets->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
// Batchsize be multiple of 128 for max utilization, will still work if is isn't.
int num_threads_x = MIN(num_images, 128);
float *expanded_images = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_images, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
hipError_t err1, err2;
err1 = hipMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
if (hipSuccess == err1) hipFree(expanded_images);
if (hipSuccess == err2) hipFree(expanded_derivs);
err1 = hipMalloc((void**)&expanded_images, input_memory_size);
err2 = hipMalloc((void**)&expanded_derivs, output_memory_size);
if (hipSuccess != err1 || hipSuccess != err2) {
printf("Out of memory on GPU! %s \n", hipGetErrorString(err1));
printf("Out of memory on GPU! %s \n", hipGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
if (scaleTargets == 0) {
hipMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
hipblasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
int module_id_start = 0;
dim3 threads(num_threads_x);
float* dw = targets->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
hipLaunchKernelGGL(( kReadRows), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
hipLaunchKernelGGL(( kExpand), dim3(blocks2), dim3(threads), 0, 0, images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) dw += num_output_channels * input_size;
hipblasSgemm('t', 'n',
num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
num_images * this_num_modules_batch,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
expanded_images, num_images * this_num_modules_batch,
1, dw, num_output_channels);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_images, expanded_derivs);
getLastCudaError("convOutpGemm: kernel execution failed");
}
template <class Pooler>
void _convPoolGemm(cudamat* images, cudamat* targets,
Shape4D images_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, Pooler pooler) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
if (scaleTargets == 0) {
hipMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
hipblasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kPool), dim3(blocks), dim3(threads), 0, 0, images->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput, pooler);
getLastCudaError("convLocalPool: kernel execution failed");
}
void _avgPoolUndoGemm(cudamat* derivs, cudamat* targets,
Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == derivs->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
if (scaleTargets == 0) {
hipMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
hipblasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kAvgPoolUndo), dim3(blocks), dim3(threads), 0, 0, derivs->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _maxPoolUndoGemm(cudamat* images, cudamat* derivs, cudamat* maxes, cudamat* targets,
Shape4D targets_shape, Shape4D derivs_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == derivs->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
if (scaleTargets == 0) {
hipMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
hipblasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
hipLaunchKernelGGL(( kMaxPoolUndo), dim3(blocks), dim3(threads), 0, 0, images->data_device, derivs->data_device,
maxes->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _CrossMapRNorm(cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int threads = 512;
int num_blocks = DIVUP(num_locs, threads);
hipLaunchKernelGGL(( kCrossMapRNorm), dim3(num_blocks), dim3(threads), 0, 0, images->data_device, targets->data_device,
num_locs, addScale, powScale, num_filters, sizeF, blocked);
getLastCudaError("_CrossMapRNorm: kernel execution failed");
}
void _CrossMapRNormUndo(cudamat* outGrads, cudamat* images, cudamat* targets,
int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int threads = 512;
int batch_offset = 0;
float *denoms;
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (sizeof(float) * num_filters);
max_batch_size = MIN(num_locs, max_batch_size);
hipError_t err;
err = hipMalloc((void**)&denoms, max_batch_size * num_filters * sizeof(float));
if (hipSuccess != err) {
printf("Out of memory on GPU!\n");
}
int num_batches = DIVUP(num_locs, max_batch_size);
for (int i = 0; i < num_batches; i++) {
int batch_size = MIN(max_batch_size, num_locs - batch_offset);
int num_blocks = DIVUP(batch_size, threads);
hipLaunchKernelGGL(( kCrossMapDenoms), dim3(num_blocks), dim3(threads), 0, 0, images->data_device, denoms, num_locs, batch_size,
batch_offset, addScale, num_filters, sizeF, blocked);
hipLaunchKernelGGL(( kCrossMapRNormUndo), dim3(num_blocks), dim3(threads), 0, 0, images->data_device, outGrads->data_device, denoms,
targets->data_device, num_locs, batch_size, batch_offset,
addScale, powScale, num_filters, sizeF, blocked);
batch_offset += batch_size;
}
hipFree(denoms);
getLastCudaError("_CrossMapRNormUndo: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
void convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, true);
}
void localUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, false);
}
void MaxPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
MaxPooler pooler;
_convPoolGemm<MaxPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void AvgPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
AvgPooler pooler;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void MaxPoolUndoGemm(cudamat* images, cudamat* maxGrads, cudamat* maxActs,
cudamat* targets, Shape4D* images_shape, Shape4D* maxGrads_shape,
ConvDesc conv_desc, float scaleTargets) {
_maxPoolUndoGemm(images, maxGrads, maxActs, targets, *images_shape,
*maxGrads_shape, conv_desc, scaleTargets, 1);
}
void AvgPoolUndoGemm(cudamat* avgGrads, cudamat* targets, Shape4D* avgGrads_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_avgPoolUndoGemm(avgGrads, targets, *avgGrads_shape, *targets_shape, conv_desc,
scaleTargets, 1);
}
void UpSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, int factor, float scaleTargets) {
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_avgPoolUndoGemm(images, targets, *images_shape, *targets_shape, conv_desc,
scaleTargets, factor * factor);
}
void DownSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor) {
AvgPooler pooler = AvgPooler();
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, 0, 1, pooler);
}
void ResponseNormCrossMapGemm(
cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
_CrossMapRNorm(images, targets, num_filters, sizeF, addScale, powScale, blocked);
}
void ResponseNormCrossMapUndoGemm(
cudamat* outGrads, cudamat* inputs, cudamat* targets, int num_filters,
int sizeF, float addScale, float powScale, bool blocked) {
_CrossMapRNormUndo(outGrads, inputs, targets, num_filters, sizeF, addScale,
powScale, blocked);
}
#ifdef __cplusplus
}
#endif
| 5aee6f77383c3103015a0e1479e4493fafe04634.cu | /** Kernels for convUp, convDown, convOutp, maxpool, avgpool, maxpoolundo,
* avgpoolundo.
* These kernels are 10-20% slower than cuda-convnet2, but have no constraints
* on number of channels and support rectangular images and rectangular kernels.
* They use cublasSgemm for convUp, convDown, convOutp.
* Data layout : Column-major
* data : (num_images, image_size_x, image_size_y, num_input_channels)
* filters : (num_output_channels, kernel_size_x, kernel_size_y, num_input_channels)
*/
#include "cudamat_conv_gemm.cuh"
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
#define MAX_MEMORY_BYTES (200 * (1 << 20))
inline void GetTempMemory(int num_images, int input_size, int num_output_channels,
int num_modules, float *input, float *output,
int* batch_size) {
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&input, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&output, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
if (cudaSuccess == err1) cudaFree(input);
if (cudaSuccess == err2) cudaFree(output);
err1 = cudaMalloc((void**)&input, input_memory_size);
err2 = cudaMalloc((void**)&output, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
*batch_size = 1;
} else {
*batch_size = max_batch_size;
}
}
void FreeTempMemory(float* input, float* output) {
cudaFree(input);
cudaFree(output);
}
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
__global__ void kExpand(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
int module_id_x = src_module_id % num_modules_x;
int module_id_y = src_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
target_id = num_images * num_modules_batch * (x + kernel_size_x * y);
source_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = 0;
}
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = images[source_id + im];
}
}
__syncthreads();
}
}
}
template <class Pooler>
__global__ void kPool(float *images, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput,
Pooler pooler) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id, target_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * num_modules * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
target_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
int regionSize = (endX - startX) * (endY - startY);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = pooler.getBaseValue();
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
source_id = num_images * (X + image_size_x * Y);
val = pooler(val, images[source_id + im]);
}
}
targets[target_id + im] = scaleOutput * pooler.output(val, regionSize);
}
}
__syncthreads();
}
__global__ void kAvgPoolUndo(float *derivs, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id;
derivs += num_images * num_modules * color;
targets += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
source_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
int regionSize = (endX - startX) * (endY - startY);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im] / regionSize;
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
atomicAdd(&targets[num_images * (X + image_size_x * Y) + im], val);
__syncthreads();
}
}
}
}
}
__global__ void kMaxPoolUndo(float * images, float *derivs, float* maxes, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x, float scaleOutput) {
int color = blockIdx.y;
int num_modules = num_modules_y * num_modules_x;
long source_id, target_id;
derivs += num_images * num_modules * color;
maxes += num_images * num_modules * color;
targets += num_images * image_size_x * image_size_y * color;
images += num_images * image_size_x * image_size_y * color;
for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) {
int module_id_x = module_id % num_modules_x;
int module_id_y = module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
source_id = num_images * module_id;
int endY = startY + kernel_size_y;
int endX = startX + kernel_size_x;
startY = MAX(startY, 0);
startX = MAX(startX, 0);
endY = MIN(endY , image_size_y);
endX = MIN(endX , image_size_x);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
float val = scaleOutput * derivs[source_id + im];
for (int Y = startY; Y < endY; Y++) {
for (int X = startX; X < endX; X++) {
target_id = num_images * (X + image_size_x * Y) + im;
if (images[target_id] == maxes[source_id + im]) {
atomicAdd(&targets[target_id], val);
}
__syncthreads();
}
}
}
}
}
__global__ void kContract(float *expanded_data, float* targets,
int num_images, int num_input_channels,
int image_size_y, int image_size_x,
int num_modules_y, int num_modules_x,
int kernel_size_y, int kernel_size_x,
int padding_y, int padding_x,
int stride_y, int stride_x,
int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int dst_module_id = module_id_offset + blockIdx.x;
int src_module_id = blockIdx.x;
int module_id_x = dst_module_id % num_modules_x;
int module_id_y = dst_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
targets += num_images * image_size_x * image_size_y * color;
expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
source_id = num_images * num_modules_batch * (x + kernel_size_x * y);
target_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
// do nothing.
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
atomicAdd(&targets[target_id + im], expanded_data[source_id + im]);
__syncthreads();
}
}
}
}
}
__global__ void kWriteRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = beta * data[im];
}
}
__global__ void kReadRows(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset) {
int c = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
data += num_images * (src_module_id + c * num_modules);
target += num_images * (dst_module_id + c * num_modules_batch);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = data[im];
}
}
__global__ void kWriteRowsMult(float* data, float* target,
int num_images, int num_modules,
int num_modules_batch, int module_id_offset,
float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
}
__global__ void kCrossMapDenoms(float* data, float* denoms,
int num_locs, int batch_locs, int batch_offset, float addScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
denoms[j * batch_locs] = 1 + addScale * sum;
}
}
}
__global__ void kCrossMapRNorm(float* data, float* target,
int num_locs, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += loc_id;
target += loc_id;
if (loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += data[i * num_locs] * data[i * num_locs];
}
target[j * num_locs] = data[j * num_locs] * __powf(1 + addScale * sum, -powScale);
}
}
}
__global__ void kCrossMapRNormUndo(float* data, float* deriv, float* denoms, float* target,
int num_locs, int batch_locs, int batch_offset, float addScale, float powScale,
int num_filters, int k, bool blocked) {
long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
data += batch_offset + loc_id;
target += batch_offset + loc_id;
deriv += batch_offset + loc_id;
denoms += loc_id;
if (batch_offset + loc_id < num_locs) {
for (int j = 0; j < num_filters; j++) {
float sum = 0;
int start = blocked ? (j / k) * k : MAX(0, -k/2 + j);
int end = MIN(num_filters, start + k);
for (int i = start; i < end; i++) {
sum += deriv[i * num_locs] * data[i * num_locs] * __powf(denoms[i * batch_locs], -powScale - 1);
}
target[j * num_locs] = deriv[j * num_locs] * __powf(denoms[j * batch_locs], -powScale) -
2 * addScale * powScale * data[j * num_locs] * sum;
}
}
}
void _convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D images_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
// Batchsize be multiple of 128 for max utilization, will still work if is isn't.
int num_threads_x = MIN(num_images, 128);
float *expanded_images = NULL, *expanded_target = NULL;
int num_modules_batch;
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_target, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
if (cudaSuccess == err1) cudaFree(expanded_images);
if (cudaSuccess == err2) cudaFree(expanded_target);
err1 = cudaMalloc((void**)&expanded_images, input_memory_size);
err2 = cudaMalloc((void**)&expanded_target, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 threads(num_threads_x);
dim3 blocks = dim3(this_num_modules_batch, num_input_channels);
kExpand<<<blocks, threads>>>(images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
cublasSgemm('n', 't',
num_images * this_num_modules_batch, num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
1, expanded_images, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
dim3 blocks2 = dim3(this_num_modules_batch, num_output_channels);
if (scaleTargets == 0) {
kWriteRows<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleOutput);
} else {
kWriteRowsMult<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_modules,
this_num_modules_batch, module_id_start,
scaleTargets, scaleOutput);
}
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_images, expanded_target);
getLastCudaError("convUpGemm: kernel execution failed");
}
void _convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D derivs_shape, Shape4D filters_shape,
Shape4D targets_shape, ConvDesc conv_desc,
float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_input_channels3 = filters_shape.shape[3];
int kernel_size_y2 = filters_shape.shape[2];
int kernel_size_x2 = filters_shape.shape[1];
int num_output_channels3 = filters_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels == num_input_channels3 / filterModuleMult);
assert (num_images == targets->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == filters->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels * filterModuleMult == filters->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
int num_threads_x = MIN(num_images, 128); // Batchsize be multiple of 128 for max utilization, will still work if is isn't.
float *expanded_target = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_target, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_target, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
if (cudaSuccess == err1) cudaFree(expanded_target);
if (cudaSuccess == err2) cudaFree(expanded_derivs);
err1 = cudaMalloc((void**)&expanded_target, input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
if (scaleTargets == 0) {
cudaMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
cublasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
int module_id_start = 0;
float* w = filters->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
dim3 threads(num_threads_x);
kReadRows<<<blocks, threads>>>(derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
if (!conv) w += num_output_channels * input_size;
cublasSgemm('n', 'n',
num_images * this_num_modules_batch, kernel_size_x * kernel_size_y * num_input_channels,
num_output_channels,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
w, num_output_channels,
0, expanded_target, num_images * this_num_modules_batch);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
kContract<<<blocks2, threads>>>(expanded_target, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_target, expanded_derivs);
getLastCudaError("convDownGemm: kernel execution failed");
}
void _convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D images_shape, Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_groups = conv_desc.num_groups;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_input_channels3Mult = targets_shape.shape[3];
int kernel_size_y2 = targets_shape.shape[2];
int kernel_size_x2 = targets_shape.shape[1];
int num_output_channels3 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
int input_size = kernel_size_y * kernel_size_x * num_input_channels;
int filterModuleMult = conv ? 1 : num_modules;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_output_channels == num_output_channels3);
assert (num_input_channels == num_input_channels2);
assert (num_input_channels * filterModuleMult == num_input_channels3Mult);
assert (num_images == images->size[0]);
assert (num_images == derivs->size[0]);
assert (num_output_channels == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
assert (kernel_size_y * kernel_size_x * num_input_channels3Mult == targets->size[1]);
assert (kernel_size_y == kernel_size_y2);
assert (kernel_size_x == kernel_size_x2);
assert (num_input_channels % num_groups == 0);
assert (num_groups == 1);
// Batchsize be multiple of 128 for max utilization, will still work if is isn't.
int num_threads_x = MIN(num_images, 128);
float *expanded_images = NULL, *expanded_derivs = NULL;
int num_modules_batch;
//GetTempMemory(num_images, input_size, num_output_channels, num_modules / filterModuleMult,
// expanded_images, expanded_derivs, &num_modules_batch);
int input_memory_size = num_images * input_size * sizeof(float);
int output_memory_size = num_images * num_output_channels * sizeof(float);
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (input_memory_size + output_memory_size);
max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult);
max_batch_size = MIN(max_batch_size, 4096);
max_batch_size = MAX(max_batch_size, 1);
cudaError_t err1, err2;
err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
if (cudaSuccess == err1) cudaFree(expanded_images);
if (cudaSuccess == err2) cudaFree(expanded_derivs);
err1 = cudaMalloc((void**)&expanded_images, input_memory_size);
err2 = cudaMalloc((void**)&expanded_derivs, output_memory_size);
if (cudaSuccess != err1 || cudaSuccess != err2) {
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1));
printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2));
}
num_modules_batch = 1;
} else {
num_modules_batch = max_batch_size;
}
int num_iter = DIVUP(num_modules, num_modules_batch);
if (scaleTargets == 0) {
cudaMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
cublasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
int module_id_start = 0;
dim3 threads(num_threads_x);
float* dw = targets->data_device;
for (int i = 0; i < num_iter; i++) {
int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start);
//printf("Step %d num_modules %d\n", i, this_num_modules_batch);
dim3 blocks = dim3(this_num_modules_batch, num_output_channels);
kReadRows<<<blocks, threads>>>(derivs->data_device, expanded_derivs,
num_images, num_modules,
this_num_modules_batch, module_id_start);
dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels);
kExpand<<<blocks2, threads>>>(images->data_device, expanded_images,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x,
this_num_modules_batch, module_id_start);
if (!conv) dw += num_output_channels * input_size;
cublasSgemm('t', 'n',
num_output_channels,
kernel_size_x * kernel_size_y * num_input_channels,
num_images * this_num_modules_batch,
scaleOutput, expanded_derivs, num_images * this_num_modules_batch,
expanded_images, num_images * this_num_modules_batch,
1, dw, num_output_channels);
if (check_cublas_error()) {
printf("Error in dot or before it.\n");
}
module_id_start += this_num_modules_batch;
}
FreeTempMemory(expanded_images, expanded_derivs);
getLastCudaError("convOutpGemm: kernel execution failed");
}
template <class Pooler>
void _convPoolGemm(cudamat* images, cudamat* targets,
Shape4D images_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput, Pooler pooler) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = targets_shape.shape[3];
int num_modules_y = targets_shape.shape[2];
int num_modules_x = targets_shape.shape[1];
int num_images = targets_shape.shape[0];
int num_input_channels2 = images_shape.shape[3];
int image_size_y = images_shape.shape[2];
int image_size_x = images_shape.shape[1];
int num_images2 = images_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == images->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == images->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]);
if (scaleTargets == 0) {
cudaMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
cublasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kPool<<<blocks, threads>>>(images->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput, pooler);
getLastCudaError("convLocalPool: kernel execution failed");
}
void _avgPoolUndoGemm(cudamat* derivs, cudamat* targets,
Shape4D derivs_shape, Shape4D targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == derivs->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
if (scaleTargets == 0) {
cudaMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
cublasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kAvgPoolUndo<<<blocks, threads>>>(derivs->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _maxPoolUndoGemm(cudamat* images, cudamat* derivs, cudamat* maxes, cudamat* targets,
Shape4D targets_shape, Shape4D derivs_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
int num_input_channels = conv_desc.num_input_channels;
int num_output_channels = conv_desc.num_output_channels;
int kernel_size_y = conv_desc.kernel_size_y;
int kernel_size_x = conv_desc.kernel_size_x;
int stride_y = conv_desc.stride_y;
int stride_x = conv_desc.stride_x;
int padding_y = conv_desc.padding_y;
int padding_x = conv_desc.padding_x;
int num_output_channels2 = derivs_shape.shape[3];
int num_modules_y = derivs_shape.shape[2];
int num_modules_x = derivs_shape.shape[1];
int num_images = derivs_shape.shape[0];
int num_input_channels2 = targets_shape.shape[3];
int image_size_y = targets_shape.shape[2];
int image_size_x = targets_shape.shape[1];
int num_images2 = targets_shape.shape[0];
int num_modules = num_modules_y * num_modules_x;
// Consistency checks.
assert (num_images == num_images2);
assert (num_output_channels == num_output_channels2);
assert (num_input_channels == num_input_channels2);
assert (num_images == derivs->size[0]);
assert (num_images == targets->size[0]);
assert (image_size_y * image_size_x * num_input_channels == targets->size[1]);
assert (num_modules_y * num_modules_x * num_output_channels == derivs->size[1]);
if (scaleTargets == 0) {
cudaMemset(targets->data_device, 0, sizeof(float) * targets->size[0] * targets->size[1]);
} else if (scaleTargets != 1) {
cublasSscal(sizeof(float) * targets->size[0] * targets->size[1], scaleTargets, targets->data_device, 1);
}
dim3 threads(128);
int num_blocks_x = MIN(4096, num_modules);
dim3 blocks = dim3(num_blocks_x, num_input_channels);
kMaxPoolUndo<<<blocks, threads>>>(images->data_device, derivs->data_device,
maxes->data_device, targets->data_device,
num_images, num_input_channels,
image_size_y, image_size_x,
num_modules_y, num_modules_x,
kernel_size_y, kernel_size_x,
padding_y, padding_x,
stride_y, stride_x, scaleOutput);
getLastCudaError("avgPoolUndo: kernel execution failed");
}
void _CrossMapRNorm(cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int threads = 512;
int num_blocks = DIVUP(num_locs, threads);
kCrossMapRNorm<<<num_blocks, threads>>>(images->data_device, targets->data_device,
num_locs, addScale, powScale, num_filters, sizeF, blocked);
getLastCudaError("_CrossMapRNorm: kernel execution failed");
}
void _CrossMapRNormUndo(cudamat* outGrads, cudamat* images, cudamat* targets,
int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
int num_locs = (images->size[0] * images->size[1]) / num_filters;
int threads = 512;
int batch_offset = 0;
float *denoms;
int max_batch_size = ((long) MAX_MEMORY_BYTES) / (sizeof(float) * num_filters);
max_batch_size = MIN(num_locs, max_batch_size);
cudaError_t err;
err = cudaMalloc((void**)&denoms, max_batch_size * num_filters * sizeof(float));
if (cudaSuccess != err) {
printf("Out of memory on GPU!\n");
}
int num_batches = DIVUP(num_locs, max_batch_size);
for (int i = 0; i < num_batches; i++) {
int batch_size = MIN(max_batch_size, num_locs - batch_offset);
int num_blocks = DIVUP(batch_size, threads);
kCrossMapDenoms<<<num_blocks, threads>>>(images->data_device, denoms, num_locs, batch_size,
batch_offset, addScale, num_filters, sizeF, blocked);
kCrossMapRNormUndo<<<num_blocks, threads>>>(images->data_device, outGrads->data_device, denoms,
targets->data_device, num_locs, batch_size, batch_offset,
addScale, powScale, num_filters, sizeF, blocked);
batch_offset += batch_size;
}
cudaFree(denoms);
getLastCudaError("_CrossMapRNormUndo: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
void convUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, true);
}
void convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, true);
}
void localUpGemm(cudamat* images, cudamat* filters, cudamat* targets,
Shape4D* images_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc,
float scaleTargets) {
_convUpGemm(images, filters, targets, *images_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets,
Shape4D* derivs_shape, Shape4D* filters_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape,
*targets_shape, conv_desc, scaleTargets, 1.0, false);
}
void localOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets,
Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape,
ConvDesc conv_desc, float scaleTargets, float scaleOutput) {
_convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape,
*targets_shape, conv_desc, scaleTargets, scaleOutput, false);
}
void MaxPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
MaxPooler pooler;
_convPoolGemm<MaxPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void AvgPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){
AvgPooler pooler;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, scaleTargets, scaleOutput, pooler);
}
void MaxPoolUndoGemm(cudamat* images, cudamat* maxGrads, cudamat* maxActs,
cudamat* targets, Shape4D* images_shape, Shape4D* maxGrads_shape,
ConvDesc conv_desc, float scaleTargets) {
_maxPoolUndoGemm(images, maxGrads, maxActs, targets, *images_shape,
*maxGrads_shape, conv_desc, scaleTargets, 1);
}
void AvgPoolUndoGemm(cudamat* avgGrads, cudamat* targets, Shape4D* avgGrads_shape,
Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) {
_avgPoolUndoGemm(avgGrads, targets, *avgGrads_shape, *targets_shape, conv_desc,
scaleTargets, 1);
}
void UpSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape,
Shape4D* targets_shape, int factor, float scaleTargets) {
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_avgPoolUndoGemm(images, targets, *images_shape, *targets_shape, conv_desc,
scaleTargets, factor * factor);
}
void DownSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor) {
AvgPooler pooler = AvgPooler();
ConvDesc conv_desc;
conv_desc.kernel_size_y = factor;
conv_desc.kernel_size_x = factor;
conv_desc.stride_y = factor;
conv_desc.stride_x = factor;
conv_desc.padding_y = 0;
conv_desc.padding_x = 0;
conv_desc.num_input_channels = images_shape->shape[3];
conv_desc.num_output_channels = targets_shape->shape[3];
conv_desc.num_groups = 1;
_convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape,
conv_desc, 0, 1, pooler);
}
void ResponseNormCrossMapGemm(
cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale,
float powScale, bool blocked) {
_CrossMapRNorm(images, targets, num_filters, sizeF, addScale, powScale, blocked);
}
void ResponseNormCrossMapUndoGemm(
cudamat* outGrads, cudamat* inputs, cudamat* targets, int num_filters,
int sizeF, float addScale, float powScale, bool blocked) {
_CrossMapRNormUndo(outGrads, inputs, targets, num_filters, sizeF, addScale,
powScale, blocked);
}
#ifdef __cplusplus
}
#endif
|
40bdbbcd480cbffa14d33b98738a562db0c353f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <random>
#include <vector>
#include <raft/core/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include "test_utils.h"
#include <cuml/common/logger.hpp>
#include <linalg/block.cuh>
namespace MLCommon {
namespace LinAlg {
using namespace std;
/* GEMM */
template <typename T>
struct BlockGemmInputs {
int m, k, n;
bool transa, transb;
int batch_size;
int vec_len;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockGemmInputs<T>& dims)
{
return os;
}
template <typename Policy, typename T>
__global__ void block_gemm_test_kernel(
bool transa, bool transb, int m, int n, int k, T alpha, const T* a, const T* b, T* c)
{
__shared__ MLCommon::LinAlg::GemmStorage<Policy, T> gemm_storage;
_block_gemm<Policy>(transa,
transb,
m,
n,
k,
alpha,
a + m * k * blockIdx.x,
b + k * n * blockIdx.x,
c + m * n * blockIdx.x,
gemm_storage);
}
template <typename Policy, typename T>
class BlockGemmTest : public ::testing::TestWithParam<BlockGemmInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockGemmInputs<T>>::GetParam();
rmm::device_uvector<T> a(params.m * params.k * params.batch_size, handle.get_stream());
rmm::device_uvector<T> b(params.k * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> c(params.m * params.n * params.batch_size, handle.get_stream());
std::vector<T> h_a(params.m * params.k * params.batch_size);
std::vector<T> h_b(params.k * params.n * params.batch_size);
std::vector<T> h_c_ref(params.m * params.n * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(a.data(), params.m * params.k * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(b.data(), params.k * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(
h_a.data(), a.data(), params.m * params.k * params.batch_size, handle.get_stream());
raft::update_host(
h_b.data(), b.data(), params.k * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
hipLaunchKernelGGL(( block_gemm_test_kernel<Policy>)
, dim3(params.batch_size), dim3(Policy::BlockSize), 0, handle.get_stream(), params.transa,
params.transb,
params.m,
params.n,
params.k,
alpha,
a.data(),
b.data(),
c.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.m; i++) {
for (int j = 0; j < params.n; j++) {
T acc = (T)0;
for (int h = 0; h < params.k; h++) {
T _a = params.transa ? h_a[bid * params.m * params.k + i * params.k + h]
: h_a[bid * params.m * params.k + h * params.m + i];
T _b = params.transb ? h_b[bid * params.k * params.n + h * params.n + j]
: h_b[bid * params.k * params.n + j * params.k + h];
acc += _a * _b;
}
h_c_ref[bid * params.m * params.n + j * params.m + i] = alpha * acc;
}
}
}
/* Check results */
match = devArrMatchHost(h_c_ref.data(),
c.data(),
params.m * params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockGemmInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockGemmInputs<float>> gemm_inputsf = {
{42, 42, 42, false, false, 20, 1, 1e-4, 12345U},
{65, 10, 20, false, true, 50, 1, 1e-4, 12345U},
{5, 80, 31, true, false, 80, 1, 1e-4, 12345U},
{11, 50, 41, true, true, 100, 1, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<double>> gemm_inputsd = {
{42, 42, 42, false, false, 20, 1, 1e-4, 12345U},
{65, 10, 20, false, true, 50, 1, 1e-4, 12345U},
{5, 80, 31, true, false, 80, 1, 1e-4, 12345U},
{11, 50, 41, true, true, 100, 1, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<float>> gemm_inputsf_vec2 = {
{30, 34, 16, false, false, 20, 2, 1e-4, 12345U},
{10, 42, 20, false, true, 20, 2, 1e-4, 12345U},
{14, 8, 22, true, false, 20, 2, 1e-4, 12345U},
{56, 72, 28, true, true, 20, 2, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<double>> gemm_inputsd_vec2 = {
{30, 34, 16, false, false, 20, 2, 1e-4, 12345U},
{10, 42, 20, false, true, 20, 2, 1e-4, 12345U},
{14, 8, 22, true, false, 20, 2, 1e-4, 12345U},
{56, 72, 28, true, true, 20, 2, 1e-4, 12345U},
};
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, float> BlockGemmTestF_1_16_1_4_16_4;
TEST_P(BlockGemmTestF_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, double> BlockGemmTestD_1_16_1_4_16_4;
TEST_P(BlockGemmTestD_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, float> BlockGemmTestF_1_32_1_4_32_8;
TEST_P(BlockGemmTestF_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, double> BlockGemmTestD_1_32_1_4_32_8;
TEST_P(BlockGemmTestD_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, float> BlockGemmTestF_1_32_1_16_64_4;
TEST_P(BlockGemmTestF_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, double> BlockGemmTestD_1_32_1_16_64_4;
TEST_P(BlockGemmTestD_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, float> BlockGemmTestF_1_16_1_16_128_2;
TEST_P(BlockGemmTestF_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, double> BlockGemmTestD_1_16_1_16_128_2;
TEST_P(BlockGemmTestD_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, float> BlockGemmTestF_2_32_2_2_16_16;
TEST_P(BlockGemmTestF_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, double> BlockGemmTestD_2_32_2_2_16_16;
TEST_P(BlockGemmTestD_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_16_1_4_16_4,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_16_1_4_16_4,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_32_1_4_32_8,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_32_1_4_32_8,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_32_1_16_64_4,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_32_1_16_64_4,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_16_1_16_128_2,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_16_1_16_128_2,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_2_32_2_2_16_16,
::testing::ValuesIn(gemm_inputsf_vec2));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_2_32_2_2_16_16,
::testing::ValuesIn(gemm_inputsd_vec2));
/* GEMV */
template <typename T>
struct BlockGemvInputs {
bool preload;
int m, n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockGemvInputs<T>& dims)
{
return os;
}
template <typename Policy, typename T>
__global__ void block_gemv_test_kernel(
int m, int n, T alpha, const T* a, const T* x, T* y, bool preload)
{
__shared__ MLCommon::LinAlg::GemvStorage<Policy, T> gemv_storage;
extern __shared__ char dyna_shared_mem[];
T* shared_vec = (T*)dyna_shared_mem;
if (preload) {
_block_gemv<Policy, true>(m,
n,
alpha,
a + m * n * blockIdx.x,
x + n * blockIdx.x,
y + m * blockIdx.x,
gemv_storage,
shared_vec);
} else {
for (int i = threadIdx.x; i < n; i += Policy::BlockSize) {
shared_vec[i] = x[n * blockIdx.x + i];
}
__syncthreads();
_block_gemv<Policy, false>(
m, n, alpha, a + m * n * blockIdx.x, shared_vec, y + m * blockIdx.x, gemv_storage);
}
}
template <typename Policy, typename T>
class BlockGemvTest : public ::testing::TestWithParam<BlockGemvInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockGemvInputs<T>>::GetParam();
rmm::device_uvector<T> a(params.m * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.m * params.batch_size, handle.get_stream());
std::vector<T> h_a(params.m * params.n * params.batch_size);
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y_ref(params.m * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(a.data(), params.m * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(
h_a.data(), a.data(), params.m * params.n * params.batch_size, handle.get_stream());
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
int shared_mem_size = params.n * sizeof(T);
hipLaunchKernelGGL(( block_gemv_test_kernel<Policy>)
, dim3(params.batch_size), dim3(Policy::BlockSize), shared_mem_size, handle.get_stream(),
params.m, params.n, alpha, a.data(), x.data(), y.data(), params.preload);
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.m; i++) {
T acc = (T)0;
for (int j = 0; j < params.n; j++) {
acc += h_a[bid * params.m * params.n + j * params.m + i] * h_x[bid * params.n + j];
}
h_y_ref[bid * params.m + i] = alpha * acc;
}
}
/* Check results */
match = devArrMatchHost(h_y_ref.data(),
y.data(),
params.m * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockGemvInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockGemvInputs<float>> gemv_inputsf = {{true, 42, 42, 20, 1e-4, 12345U},
{true, 65, 10, 50, 1e-4, 12345U},
{false, 5, 80, 100, 1e-4, 12345U}};
const std::vector<BlockGemvInputs<double>> gemv_inputsd = {{true, 42, 42, 20, 1e-4, 12345U},
{true, 65, 10, 50, 1e-4, 12345U},
{false, 5, 80, 100, 1e-4, 12345U}};
typedef BlockGemvTest<BlockGemvPolicy<16, 4>, float> BlockGemvTestF_16_4;
TEST_P(BlockGemvTestF_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<16, 4>, double> BlockGemvTestD_16_4;
TEST_P(BlockGemvTestD_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<32, 8>, float> BlockGemvTestF_32_8;
TEST_P(BlockGemvTestF_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<32, 8>, double> BlockGemvTestD_32_8;
TEST_P(BlockGemvTestD_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<128, 2>, float> BlockGemvTestF_128_2;
TEST_P(BlockGemvTestF_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<128, 2>, double> BlockGemvTestD_128_2;
TEST_P(BlockGemvTestD_128_2, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_16_4, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_16_4, ::testing::ValuesIn(gemv_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_32_8, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_32_8, ::testing::ValuesIn(gemv_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_128_2, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_128_2, ::testing::ValuesIn(gemv_inputsd));
/* DOT */
template <typename T>
struct BlockDotInputs {
bool broadcast;
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockDotInputs<T>& dims)
{
return os;
}
template <int BlockSize, bool Broadcast, typename T>
__global__ void block_dot_test_kernel(int n, const T* x, const T* y, T* d_dot)
{
__shared__ ReductionStorage<BlockSize, T> reduction_storage;
T dot_ =
_block_dot<BlockSize, Broadcast>(n, x + n * blockIdx.x, y + n * blockIdx.x, reduction_storage);
if (!Broadcast && threadIdx.x == 0)
d_dot[blockIdx.x] = dot_;
else if (Broadcast && threadIdx.x == BlockSize - 1)
d_dot[blockIdx.x] = dot_;
}
template <typename T>
class BlockDotTest : public ::testing::TestWithParam<BlockDotInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockDotInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> dot_dev(params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y(params.n * params.batch_size);
std::vector<T> h_dot_ref(params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(y.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
raft::update_host(h_y.data(), y.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
if (params.broadcast)
hipLaunchKernelGGL(( block_dot_test_kernel<BlockSize, true>)
, dim3(params.batch_size), dim3(BlockSize), 0, handle.get_stream(),
params.n, x.data(), y.data(), dot_dev.data());
else
hipLaunchKernelGGL(( block_dot_test_kernel<BlockSize, false>)
, dim3(params.batch_size), dim3(BlockSize), 0, handle.get_stream(),
params.n, x.data(), y.data(), dot_dev.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
h_dot_ref[bid] += h_x[bid * params.n + i] * h_y[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_dot_ref.data(),
dot_dev.data(),
params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockDotInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockDotInputs<float>> dot_inputsf = {{true, 9, 20, 1e-4, 12345U},
{true, 65, 50, 1e-4, 12345U},
{true, 200, 100, 1e-4, 12345U},
{false, 200, 100, 1e-4, 12345U}};
const std::vector<BlockDotInputs<double>> dot_inputsd = {{true, 9, 20, 1e-4, 12345U},
{true, 65, 50, 1e-4, 12345U},
{true, 200, 100, 1e-4, 12345U},
{false, 200, 100, 1e-4, 12345U}};
typedef BlockDotTest<float> BlockDotTestF;
TEST_P(BlockDotTestF, Result) { EXPECT_TRUE(match); }
typedef BlockDotTest<double> BlockDotTestD;
TEST_P(BlockDotTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestF, ::testing::ValuesIn(dot_inputsf));
INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestD, ::testing::ValuesIn(dot_inputsd));
/* x*A*x' */
template <typename T>
struct BlockXaxtInputs {
bool broadcast;
bool preload;
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockXaxtInputs<T>& dims)
{
return os;
}
template <int BlockSize, bool Broadcast, typename T>
__global__ void block_xAxt_test_kernel(int n, const T* x, const T* A, T* d_res, bool preload)
{
extern __shared__ char dyna_shared_mem[];
T* shared_vec = (T*)dyna_shared_mem;
__shared__ ReductionStorage<BlockSize, T> reduction_storage;
T res_;
if (preload) {
res_ = _block_xAxt<BlockSize, Broadcast, true>(
n, x + n * blockIdx.x, A + n * n * blockIdx.x, reduction_storage, shared_vec);
} else {
for (int i = threadIdx.x; i < n; i += BlockSize) {
shared_vec[i] = x[n * blockIdx.x + i];
}
__syncthreads();
res_ = _block_xAxt<BlockSize, Broadcast, false>(
n, shared_vec, A + n * n * blockIdx.x, reduction_storage);
}
if (!Broadcast && threadIdx.x == 0)
d_res[blockIdx.x] = res_;
else if (Broadcast && threadIdx.x == BlockSize - 1)
d_res[blockIdx.x] = res_;
}
template <typename T>
class BlockXaxtTest : public ::testing::TestWithParam<BlockXaxtInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockXaxtInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> A(params.n * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> res_dev(params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_A(params.n * params.n * params.batch_size);
std::vector<T> h_res_ref(params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(A.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
raft::update_host(
h_A.data(), A.data(), params.n * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
int shared_mem_size = params.n * sizeof(T);
if (params.broadcast)
hipLaunchKernelGGL(( block_xAxt_test_kernel<BlockSize, true>)
, dim3(params.batch_size), dim3(BlockSize), shared_mem_size, handle.get_stream(),
params.n, x.data(), A.data(), res_dev.data(), params.preload);
else
hipLaunchKernelGGL(( block_xAxt_test_kernel<BlockSize, false>)
, dim3(params.batch_size), dim3(BlockSize), shared_mem_size, handle.get_stream(),
params.n, x.data(), A.data(), res_dev.data(), params.preload);
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
T acc = 0;
for (int j = 0; j < params.n; j++) {
acc += h_A[bid * params.n * params.n + j * params.n + i] * h_x[bid * params.n + j];
}
h_res_ref[bid] += acc * h_x[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_res_ref.data(),
res_dev.data(),
params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockXaxtInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockXaxtInputs<float>> xAxt_inputsf = {{true, true, 9, 20, 1e-2, 12345U},
{true, true, 65, 50, 1e-2, 12345U},
{true, true, 200, 100, 1e-2, 12345U},
{false, true, 200, 100, 1e-2, 12345U},
{true, false, 200, 100, 1e-2, 12345U}};
const std::vector<BlockXaxtInputs<double>> xAxt_inputsd = {{true, true, 9, 20, 1e-4, 12345U},
{true, true, 65, 50, 1e-4, 12345U},
{true, true, 200, 100, 1e-4, 12345U},
{false, true, 200, 100, 1e-4, 12345U},
{true, false, 200, 100, 1e-2, 12345U}};
typedef BlockXaxtTest<float> BlockXaxtTestF;
TEST_P(BlockXaxtTestF, Result) { EXPECT_TRUE(match); }
typedef BlockXaxtTest<double> BlockXaxtTestD;
TEST_P(BlockXaxtTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestF, ::testing::ValuesIn(xAxt_inputsf));
INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestD, ::testing::ValuesIn(xAxt_inputsd));
/* y=alpha*x */
template <typename T>
struct BlockAxInputs {
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockAxInputs<T>& dims)
{
return os;
}
template <typename T>
__global__ void block_ax_test_kernel(int n, T alpha, const T* x, T* y)
{
_block_ax(n, alpha, x + n * blockIdx.x, y + n * blockIdx.x);
}
template <typename T>
class BlockAxTest : public ::testing::TestWithParam<BlockAxInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockAxInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y_ref(params.n * params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
hipLaunchKernelGGL(( block_ax_test_kernel), dim3(params.batch_size), dim3(BlockSize), 0, handle.get_stream(),
params.n, alpha, x.data(), y.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
h_y_ref[bid * params.n + i] = alpha * h_x[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_y_ref.data(),
y.data(),
params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockAxInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockAxInputs<float>> ax_inputsf = {
{9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}};
const std::vector<BlockAxInputs<double>> ax_inputsd = {
{9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}};
typedef BlockAxTest<float> BlockAxTestF;
TEST_P(BlockAxTestF, Result) { EXPECT_TRUE(match); }
typedef BlockAxTest<double> BlockAxTestD;
TEST_P(BlockAxTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestF, ::testing::ValuesIn(ax_inputsf));
INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestD, ::testing::ValuesIn(ax_inputsd));
/* Covariance stability */
template <typename T>
struct BlockCovStabilityInputs {
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockCovStabilityInputs<T>& dims)
{
return os;
}
template <typename CovPolicy, typename T>
__global__ void block_cov_stability_test_kernel(int n, const T* in, T* out)
{
__shared__ CovStabilityStorage<CovPolicy, T> cov_stability_storage;
_block_covariance_stability<CovPolicy>(
n, in + n * n * blockIdx.x, out + n * n * blockIdx.x, cov_stability_storage);
}
template <typename CovPolicy, typename T>
class BlockCovStabilityTest : public ::testing::TestWithParam<BlockCovStabilityInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockCovStabilityInputs<T>>::GetParam();
rmm::device_uvector<T> d_in(params.n * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> d_out(params.n * params.n * params.batch_size, handle.get_stream());
std::vector<T> h_in(params.n * params.n * params.batch_size);
std::vector<T> h_out(params.n * params.n * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(
d_in.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(
h_in.data(), d_in.data(), params.n * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
hipLaunchKernelGGL(( block_cov_stability_test_kernel<CovPolicy>)
, dim3(params.batch_size), dim3(CovPolicy::BlockSize), 0, handle.get_stream(),
params.n, d_in.data(), d_out.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n - 1; i++) {
for (int j = i + 1; j < params.n; j++) {
T val = 0.5 * (h_in[bid * params.n * params.n + j * params.n + i] +
h_in[bid * params.n * params.n + i * params.n + j]);
h_out[bid * params.n * params.n + j * params.n + i] = val;
h_out[bid * params.n * params.n + i * params.n + j] = val;
}
}
for (int i = 0; i < params.n; i++) {
h_out[bid * params.n * params.n + i * params.n + i] =
abs(h_in[bid * params.n * params.n + i * params.n + i]);
}
}
/* Check results */
match = devArrMatchHost(h_out.data(),
d_out.data(),
params.n * params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockCovStabilityInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockCovStabilityInputs<float>> cs_inputsf = {
{15, 4, 1e-4, 12345U},
{33, 10, 1e-4, 12345U},
{220, 130, 1e-4, 12345U},
};
const std::vector<BlockCovStabilityInputs<double>> cs_inputsd = {
{15, 4, 1e-4, 12345U},
{33, 10, 1e-4, 12345U},
{220, 130, 1e-4, 12345U},
};
typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, float> BlockCovStabilityTestF_1_1_8_4;
TEST_P(BlockCovStabilityTestF_1_1_8_4, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, double> BlockCovStabilityTestD_1_1_8_4;
TEST_P(BlockCovStabilityTestD_1_1_8_4, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, float> BlockCovStabilityTestF_1_4_32_8;
TEST_P(BlockCovStabilityTestF_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, double> BlockCovStabilityTestD_1_4_32_8;
TEST_P(BlockCovStabilityTestD_1_4_32_8, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestF_1_1_8_4,
::testing::ValuesIn(cs_inputsf));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestD_1_1_8_4,
::testing::ValuesIn(cs_inputsd));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestF_1_4_32_8,
::testing::ValuesIn(cs_inputsf));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestD_1_4_32_8,
::testing::ValuesIn(cs_inputsd));
} // namespace LinAlg
} // namespace MLCommon
| 40bdbbcd480cbffa14d33b98738a562db0c353f2.cu | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <random>
#include <vector>
#include <raft/core/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include "test_utils.h"
#include <cuml/common/logger.hpp>
#include <linalg/block.cuh>
namespace MLCommon {
namespace LinAlg {
using namespace std;
/* GEMM */
template <typename T>
struct BlockGemmInputs {
int m, k, n;
bool transa, transb;
int batch_size;
int vec_len;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockGemmInputs<T>& dims)
{
return os;
}
template <typename Policy, typename T>
__global__ void block_gemm_test_kernel(
bool transa, bool transb, int m, int n, int k, T alpha, const T* a, const T* b, T* c)
{
__shared__ MLCommon::LinAlg::GemmStorage<Policy, T> gemm_storage;
_block_gemm<Policy>(transa,
transb,
m,
n,
k,
alpha,
a + m * k * blockIdx.x,
b + k * n * blockIdx.x,
c + m * n * blockIdx.x,
gemm_storage);
}
template <typename Policy, typename T>
class BlockGemmTest : public ::testing::TestWithParam<BlockGemmInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockGemmInputs<T>>::GetParam();
rmm::device_uvector<T> a(params.m * params.k * params.batch_size, handle.get_stream());
rmm::device_uvector<T> b(params.k * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> c(params.m * params.n * params.batch_size, handle.get_stream());
std::vector<T> h_a(params.m * params.k * params.batch_size);
std::vector<T> h_b(params.k * params.n * params.batch_size);
std::vector<T> h_c_ref(params.m * params.n * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(a.data(), params.m * params.k * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(b.data(), params.k * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(
h_a.data(), a.data(), params.m * params.k * params.batch_size, handle.get_stream());
raft::update_host(
h_b.data(), b.data(), params.k * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
block_gemm_test_kernel<Policy>
<<<params.batch_size, Policy::BlockSize, 0, handle.get_stream()>>>(params.transa,
params.transb,
params.m,
params.n,
params.k,
alpha,
a.data(),
b.data(),
c.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.m; i++) {
for (int j = 0; j < params.n; j++) {
T acc = (T)0;
for (int h = 0; h < params.k; h++) {
T _a = params.transa ? h_a[bid * params.m * params.k + i * params.k + h]
: h_a[bid * params.m * params.k + h * params.m + i];
T _b = params.transb ? h_b[bid * params.k * params.n + h * params.n + j]
: h_b[bid * params.k * params.n + j * params.k + h];
acc += _a * _b;
}
h_c_ref[bid * params.m * params.n + j * params.m + i] = alpha * acc;
}
}
}
/* Check results */
match = devArrMatchHost(h_c_ref.data(),
c.data(),
params.m * params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockGemmInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockGemmInputs<float>> gemm_inputsf = {
{42, 42, 42, false, false, 20, 1, 1e-4, 12345U},
{65, 10, 20, false, true, 50, 1, 1e-4, 12345U},
{5, 80, 31, true, false, 80, 1, 1e-4, 12345U},
{11, 50, 41, true, true, 100, 1, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<double>> gemm_inputsd = {
{42, 42, 42, false, false, 20, 1, 1e-4, 12345U},
{65, 10, 20, false, true, 50, 1, 1e-4, 12345U},
{5, 80, 31, true, false, 80, 1, 1e-4, 12345U},
{11, 50, 41, true, true, 100, 1, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<float>> gemm_inputsf_vec2 = {
{30, 34, 16, false, false, 20, 2, 1e-4, 12345U},
{10, 42, 20, false, true, 20, 2, 1e-4, 12345U},
{14, 8, 22, true, false, 20, 2, 1e-4, 12345U},
{56, 72, 28, true, true, 20, 2, 1e-4, 12345U},
};
const std::vector<BlockGemmInputs<double>> gemm_inputsd_vec2 = {
{30, 34, 16, false, false, 20, 2, 1e-4, 12345U},
{10, 42, 20, false, true, 20, 2, 1e-4, 12345U},
{14, 8, 22, true, false, 20, 2, 1e-4, 12345U},
{56, 72, 28, true, true, 20, 2, 1e-4, 12345U},
};
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, float> BlockGemmTestF_1_16_1_4_16_4;
TEST_P(BlockGemmTestF_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, double> BlockGemmTestD_1_16_1_4_16_4;
TEST_P(BlockGemmTestD_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, float> BlockGemmTestF_1_32_1_4_32_8;
TEST_P(BlockGemmTestF_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, double> BlockGemmTestD_1_32_1_4_32_8;
TEST_P(BlockGemmTestD_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, float> BlockGemmTestF_1_32_1_16_64_4;
TEST_P(BlockGemmTestF_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, double> BlockGemmTestD_1_32_1_16_64_4;
TEST_P(BlockGemmTestD_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, float> BlockGemmTestF_1_16_1_16_128_2;
TEST_P(BlockGemmTestF_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, double> BlockGemmTestD_1_16_1_16_128_2;
TEST_P(BlockGemmTestD_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, float> BlockGemmTestF_2_32_2_2_16_16;
TEST_P(BlockGemmTestF_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); }
typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, double> BlockGemmTestD_2_32_2_2_16_16;
TEST_P(BlockGemmTestD_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_16_1_4_16_4,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_16_1_4_16_4,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_32_1_4_32_8,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_32_1_4_32_8,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_32_1_16_64_4,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_32_1_16_64_4,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_1_16_1_16_128_2,
::testing::ValuesIn(gemm_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_1_16_1_16_128_2,
::testing::ValuesIn(gemm_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestF_2_32_2_2_16_16,
::testing::ValuesIn(gemm_inputsf_vec2));
INSTANTIATE_TEST_CASE_P(BlockGemmTests,
BlockGemmTestD_2_32_2_2_16_16,
::testing::ValuesIn(gemm_inputsd_vec2));
/* GEMV */
template <typename T>
struct BlockGemvInputs {
bool preload;
int m, n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockGemvInputs<T>& dims)
{
return os;
}
template <typename Policy, typename T>
__global__ void block_gemv_test_kernel(
int m, int n, T alpha, const T* a, const T* x, T* y, bool preload)
{
__shared__ MLCommon::LinAlg::GemvStorage<Policy, T> gemv_storage;
extern __shared__ char dyna_shared_mem[];
T* shared_vec = (T*)dyna_shared_mem;
if (preload) {
_block_gemv<Policy, true>(m,
n,
alpha,
a + m * n * blockIdx.x,
x + n * blockIdx.x,
y + m * blockIdx.x,
gemv_storage,
shared_vec);
} else {
for (int i = threadIdx.x; i < n; i += Policy::BlockSize) {
shared_vec[i] = x[n * blockIdx.x + i];
}
__syncthreads();
_block_gemv<Policy, false>(
m, n, alpha, a + m * n * blockIdx.x, shared_vec, y + m * blockIdx.x, gemv_storage);
}
}
template <typename Policy, typename T>
class BlockGemvTest : public ::testing::TestWithParam<BlockGemvInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockGemvInputs<T>>::GetParam();
rmm::device_uvector<T> a(params.m * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.m * params.batch_size, handle.get_stream());
std::vector<T> h_a(params.m * params.n * params.batch_size);
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y_ref(params.m * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(a.data(), params.m * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(
h_a.data(), a.data(), params.m * params.n * params.batch_size, handle.get_stream());
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
int shared_mem_size = params.n * sizeof(T);
block_gemv_test_kernel<Policy>
<<<params.batch_size, Policy::BlockSize, shared_mem_size, handle.get_stream()>>>(
params.m, params.n, alpha, a.data(), x.data(), y.data(), params.preload);
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.m; i++) {
T acc = (T)0;
for (int j = 0; j < params.n; j++) {
acc += h_a[bid * params.m * params.n + j * params.m + i] * h_x[bid * params.n + j];
}
h_y_ref[bid * params.m + i] = alpha * acc;
}
}
/* Check results */
match = devArrMatchHost(h_y_ref.data(),
y.data(),
params.m * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockGemvInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockGemvInputs<float>> gemv_inputsf = {{true, 42, 42, 20, 1e-4, 12345U},
{true, 65, 10, 50, 1e-4, 12345U},
{false, 5, 80, 100, 1e-4, 12345U}};
const std::vector<BlockGemvInputs<double>> gemv_inputsd = {{true, 42, 42, 20, 1e-4, 12345U},
{true, 65, 10, 50, 1e-4, 12345U},
{false, 5, 80, 100, 1e-4, 12345U}};
typedef BlockGemvTest<BlockGemvPolicy<16, 4>, float> BlockGemvTestF_16_4;
TEST_P(BlockGemvTestF_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<16, 4>, double> BlockGemvTestD_16_4;
TEST_P(BlockGemvTestD_16_4, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<32, 8>, float> BlockGemvTestF_32_8;
TEST_P(BlockGemvTestF_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<32, 8>, double> BlockGemvTestD_32_8;
TEST_P(BlockGemvTestD_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<128, 2>, float> BlockGemvTestF_128_2;
TEST_P(BlockGemvTestF_128_2, Result) { EXPECT_TRUE(match); }
typedef BlockGemvTest<BlockGemvPolicy<128, 2>, double> BlockGemvTestD_128_2;
TEST_P(BlockGemvTestD_128_2, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_16_4, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_16_4, ::testing::ValuesIn(gemv_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_32_8, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_32_8, ::testing::ValuesIn(gemv_inputsd));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_128_2, ::testing::ValuesIn(gemv_inputsf));
INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_128_2, ::testing::ValuesIn(gemv_inputsd));
/* DOT */
template <typename T>
struct BlockDotInputs {
bool broadcast;
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockDotInputs<T>& dims)
{
return os;
}
template <int BlockSize, bool Broadcast, typename T>
__global__ void block_dot_test_kernel(int n, const T* x, const T* y, T* d_dot)
{
__shared__ ReductionStorage<BlockSize, T> reduction_storage;
T dot_ =
_block_dot<BlockSize, Broadcast>(n, x + n * blockIdx.x, y + n * blockIdx.x, reduction_storage);
if (!Broadcast && threadIdx.x == 0)
d_dot[blockIdx.x] = dot_;
else if (Broadcast && threadIdx.x == BlockSize - 1)
d_dot[blockIdx.x] = dot_;
}
template <typename T>
class BlockDotTest : public ::testing::TestWithParam<BlockDotInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockDotInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> dot_dev(params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y(params.n * params.batch_size);
std::vector<T> h_dot_ref(params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(y.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
raft::update_host(h_y.data(), y.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
if (params.broadcast)
block_dot_test_kernel<BlockSize, true>
<<<params.batch_size, BlockSize, 0, handle.get_stream()>>>(
params.n, x.data(), y.data(), dot_dev.data());
else
block_dot_test_kernel<BlockSize, false>
<<<params.batch_size, BlockSize, 0, handle.get_stream()>>>(
params.n, x.data(), y.data(), dot_dev.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
h_dot_ref[bid] += h_x[bid * params.n + i] * h_y[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_dot_ref.data(),
dot_dev.data(),
params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockDotInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockDotInputs<float>> dot_inputsf = {{true, 9, 20, 1e-4, 12345U},
{true, 65, 50, 1e-4, 12345U},
{true, 200, 100, 1e-4, 12345U},
{false, 200, 100, 1e-4, 12345U}};
const std::vector<BlockDotInputs<double>> dot_inputsd = {{true, 9, 20, 1e-4, 12345U},
{true, 65, 50, 1e-4, 12345U},
{true, 200, 100, 1e-4, 12345U},
{false, 200, 100, 1e-4, 12345U}};
typedef BlockDotTest<float> BlockDotTestF;
TEST_P(BlockDotTestF, Result) { EXPECT_TRUE(match); }
typedef BlockDotTest<double> BlockDotTestD;
TEST_P(BlockDotTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestF, ::testing::ValuesIn(dot_inputsf));
INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestD, ::testing::ValuesIn(dot_inputsd));
/* x*A*x' */
template <typename T>
struct BlockXaxtInputs {
bool broadcast;
bool preload;
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockXaxtInputs<T>& dims)
{
return os;
}
template <int BlockSize, bool Broadcast, typename T>
__global__ void block_xAxt_test_kernel(int n, const T* x, const T* A, T* d_res, bool preload)
{
extern __shared__ char dyna_shared_mem[];
T* shared_vec = (T*)dyna_shared_mem;
__shared__ ReductionStorage<BlockSize, T> reduction_storage;
T res_;
if (preload) {
res_ = _block_xAxt<BlockSize, Broadcast, true>(
n, x + n * blockIdx.x, A + n * n * blockIdx.x, reduction_storage, shared_vec);
} else {
for (int i = threadIdx.x; i < n; i += BlockSize) {
shared_vec[i] = x[n * blockIdx.x + i];
}
__syncthreads();
res_ = _block_xAxt<BlockSize, Broadcast, false>(
n, shared_vec, A + n * n * blockIdx.x, reduction_storage);
}
if (!Broadcast && threadIdx.x == 0)
d_res[blockIdx.x] = res_;
else if (Broadcast && threadIdx.x == BlockSize - 1)
d_res[blockIdx.x] = res_;
}
template <typename T>
class BlockXaxtTest : public ::testing::TestWithParam<BlockXaxtInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockXaxtInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> A(params.n * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> res_dev(params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_A(params.n * params.n * params.batch_size);
std::vector<T> h_res_ref(params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
r.uniform(A.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
raft::update_host(
h_A.data(), A.data(), params.n * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
int shared_mem_size = params.n * sizeof(T);
if (params.broadcast)
block_xAxt_test_kernel<BlockSize, true>
<<<params.batch_size, BlockSize, shared_mem_size, handle.get_stream()>>>(
params.n, x.data(), A.data(), res_dev.data(), params.preload);
else
block_xAxt_test_kernel<BlockSize, false>
<<<params.batch_size, BlockSize, shared_mem_size, handle.get_stream()>>>(
params.n, x.data(), A.data(), res_dev.data(), params.preload);
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
T acc = 0;
for (int j = 0; j < params.n; j++) {
acc += h_A[bid * params.n * params.n + j * params.n + i] * h_x[bid * params.n + j];
}
h_res_ref[bid] += acc * h_x[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_res_ref.data(),
res_dev.data(),
params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockXaxtInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockXaxtInputs<float>> xAxt_inputsf = {{true, true, 9, 20, 1e-2, 12345U},
{true, true, 65, 50, 1e-2, 12345U},
{true, true, 200, 100, 1e-2, 12345U},
{false, true, 200, 100, 1e-2, 12345U},
{true, false, 200, 100, 1e-2, 12345U}};
const std::vector<BlockXaxtInputs<double>> xAxt_inputsd = {{true, true, 9, 20, 1e-4, 12345U},
{true, true, 65, 50, 1e-4, 12345U},
{true, true, 200, 100, 1e-4, 12345U},
{false, true, 200, 100, 1e-4, 12345U},
{true, false, 200, 100, 1e-2, 12345U}};
typedef BlockXaxtTest<float> BlockXaxtTestF;
TEST_P(BlockXaxtTestF, Result) { EXPECT_TRUE(match); }
typedef BlockXaxtTest<double> BlockXaxtTestD;
TEST_P(BlockXaxtTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestF, ::testing::ValuesIn(xAxt_inputsf));
INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestD, ::testing::ValuesIn(xAxt_inputsd));
/* y=alpha*x */
template <typename T>
struct BlockAxInputs {
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockAxInputs<T>& dims)
{
return os;
}
template <typename T>
__global__ void block_ax_test_kernel(int n, T alpha, const T* x, T* y)
{
_block_ax(n, alpha, x + n * blockIdx.x, y + n * blockIdx.x);
}
template <typename T>
class BlockAxTest : public ::testing::TestWithParam<BlockAxInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockAxInputs<T>>::GetParam();
rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream());
std::vector<T> h_x(params.n * params.batch_size);
std::vector<T> h_y_ref(params.n * params.batch_size, (T)0);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Generate random alpha */
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(-2.0, 2.0);
T alpha = distribution(generator);
/* Copy to host */
raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
constexpr int BlockSize = 64;
block_ax_test_kernel<<<params.batch_size, BlockSize, 0, handle.get_stream()>>>(
params.n, alpha, x.data(), y.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n; i++) {
h_y_ref[bid * params.n + i] = alpha * h_x[bid * params.n + i];
}
}
/* Check results */
match = devArrMatchHost(h_y_ref.data(),
y.data(),
params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockAxInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockAxInputs<float>> ax_inputsf = {
{9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}};
const std::vector<BlockAxInputs<double>> ax_inputsd = {
{9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}};
typedef BlockAxTest<float> BlockAxTestF;
TEST_P(BlockAxTestF, Result) { EXPECT_TRUE(match); }
typedef BlockAxTest<double> BlockAxTestD;
TEST_P(BlockAxTestD, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestF, ::testing::ValuesIn(ax_inputsf));
INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestD, ::testing::ValuesIn(ax_inputsd));
/* Covariance stability */
template <typename T>
struct BlockCovStabilityInputs {
int n;
int batch_size;
T eps;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const BlockCovStabilityInputs<T>& dims)
{
return os;
}
template <typename CovPolicy, typename T>
__global__ void block_cov_stability_test_kernel(int n, const T* in, T* out)
{
__shared__ CovStabilityStorage<CovPolicy, T> cov_stability_storage;
_block_covariance_stability<CovPolicy>(
n, in + n * n * blockIdx.x, out + n * n * blockIdx.x, cov_stability_storage);
}
template <typename CovPolicy, typename T>
class BlockCovStabilityTest : public ::testing::TestWithParam<BlockCovStabilityInputs<T>> {
protected:
void basicTest()
{
raft::handle_t handle;
params = ::testing::TestWithParam<BlockCovStabilityInputs<T>>::GetParam();
rmm::device_uvector<T> d_in(params.n * params.n * params.batch_size, handle.get_stream());
rmm::device_uvector<T> d_out(params.n * params.n * params.batch_size, handle.get_stream());
std::vector<T> h_in(params.n * params.n * params.batch_size);
std::vector<T> h_out(params.n * params.n * params.batch_size);
/* Generate random data on device */
raft::random::Rng r(params.seed);
r.uniform(
d_in.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream());
/* Copy to host */
raft::update_host(
h_in.data(), d_in.data(), params.n * params.n * params.batch_size, handle.get_stream());
handle.sync_stream(handle.get_stream());
/* Compute using tested prims */
block_cov_stability_test_kernel<CovPolicy>
<<<params.batch_size, CovPolicy::BlockSize, 0, handle.get_stream()>>>(
params.n, d_in.data(), d_out.data());
/* Compute reference results */
for (int bid = 0; bid < params.batch_size; bid++) {
for (int i = 0; i < params.n - 1; i++) {
for (int j = i + 1; j < params.n; j++) {
T val = 0.5 * (h_in[bid * params.n * params.n + j * params.n + i] +
h_in[bid * params.n * params.n + i * params.n + j]);
h_out[bid * params.n * params.n + j * params.n + i] = val;
h_out[bid * params.n * params.n + i * params.n + j] = val;
}
}
for (int i = 0; i < params.n; i++) {
h_out[bid * params.n * params.n + i * params.n + i] =
abs(h_in[bid * params.n * params.n + i * params.n + i]);
}
}
/* Check results */
match = devArrMatchHost(h_out.data(),
d_out.data(),
params.n * params.n * params.batch_size,
raft::CompareApprox<T>(params.eps),
handle.get_stream());
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
BlockCovStabilityInputs<T> params;
testing::AssertionResult match = testing::AssertionFailure();
};
const std::vector<BlockCovStabilityInputs<float>> cs_inputsf = {
{15, 4, 1e-4, 12345U},
{33, 10, 1e-4, 12345U},
{220, 130, 1e-4, 12345U},
};
const std::vector<BlockCovStabilityInputs<double>> cs_inputsd = {
{15, 4, 1e-4, 12345U},
{33, 10, 1e-4, 12345U},
{220, 130, 1e-4, 12345U},
};
typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, float> BlockCovStabilityTestF_1_1_8_4;
TEST_P(BlockCovStabilityTestF_1_1_8_4, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, double> BlockCovStabilityTestD_1_1_8_4;
TEST_P(BlockCovStabilityTestD_1_1_8_4, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, float> BlockCovStabilityTestF_1_4_32_8;
TEST_P(BlockCovStabilityTestF_1_4_32_8, Result) { EXPECT_TRUE(match); }
typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, double> BlockCovStabilityTestD_1_4_32_8;
TEST_P(BlockCovStabilityTestD_1_4_32_8, Result) { EXPECT_TRUE(match); }
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestF_1_1_8_4,
::testing::ValuesIn(cs_inputsf));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestD_1_1_8_4,
::testing::ValuesIn(cs_inputsd));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestF_1_4_32_8,
::testing::ValuesIn(cs_inputsf));
INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests,
BlockCovStabilityTestD_1_4_32_8,
::testing::ValuesIn(cs_inputsd));
} // namespace LinAlg
} // namespace MLCommon
|
ac9d865f2ef8ca3a4b81cb3fc56bdcff519cf7b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include "scan.cuh"
int main(int argc, char *argv[]) {
using namespace std;
long n = atol(argv[1]);
float *hA;
float *out;
hA = (float *)malloc(n * sizeof(float));
out = (float *)malloc(n * sizeof(float));
for (int i = 0; i < n; i++) {
hA[i] = ((float)rand() / (RAND_MAX)) * 2 - 1;
out[i] = 0;
}
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
scan(hA, out, n, 1024);
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << out[n-1] << endl;
cout << ms << endl;
} | ac9d865f2ef8ca3a4b81cb3fc56bdcff519cf7b2.cu | #include "cuda_runtime.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include "scan.cuh"
int main(int argc, char *argv[]) {
using namespace std;
long n = atol(argv[1]);
float *hA;
float *out;
hA = (float *)malloc(n * sizeof(float));
out = (float *)malloc(n * sizeof(float));
for (int i = 0; i < n; i++) {
hA[i] = ((float)rand() / (RAND_MAX)) * 2 - 1;
out[i] = 0;
}
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
scan(hA, out, n, 1024);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << out[n-1] << endl;
cout << ms << endl;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.