hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
9533a840c57eaac348fd1be74a026290f01bf761.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 9533a840c57eaac348fd1be74a026290f01bf761.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
c550ff0162454993c5e88bad3215b615ba522d1c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include "spacetodepth_impl.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void SpaceToDepth(const size_t size, const T *input, const size_t in,
const size_t ic, const size_t ih, const size_t iw,
const size_t on, const size_t oc, const size_t oh,
const size_t ow, const size_t r, T *output) {
size_t temp_stride = 0;
size_t temp_pos = 0;
size_t output_pos = 0;
size_t input_pos_array[SPACETODEPTH_BUFFER_DIMENSION];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
temp_stride = ic * ih * iw;
input_pos_array[0] = pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= ic;
input_pos_array[1] = temp_pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= ih;
input_pos_array[2] = temp_pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= iw;
input_pos_array[3] = temp_pos / temp_stride;
output_pos += input_pos_array[0];
output_pos = (output_pos * oc) +
(input_pos_array[1] +
(r * (input_pos_array[2] % r) + input_pos_array[3] % r) * ic);
output_pos = (output_pos * oh) + (input_pos_array[2] / r);
output_pos = (output_pos * ow) + (input_pos_array[3] / r);
output[output_pos] = input[pos];
output_pos = 0;
}
return;
}
template <typename T>
void CalSpaceToDepth(const size_t size, const T *input, const size_t in,
const size_t ic, const size_t ih, const size_t iw,
const size_t on, const size_t oc, const size_t oh,
const size_t ow, const size_t r, T *output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SpaceToDepth), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size, input, in, ic, ih, iw, on, oc, oh, ow, r, output);
return;
}
template void CalSpaceToDepth<float>(const size_t size, const float *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, float *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<half>(const size_t size, const half *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, half *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<int>(const size_t size, const int *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<int64_t>(const size_t size, const int64_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int64_t *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<int16_t>(const size_t size, const int16_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int16_t *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<int8_t>(const size_t size, const int8_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int8_t *output,
hipStream_t cuda_stream);
template void CalSpaceToDepth<uint8_t>(const size_t size, const uint8_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, uint8_t *output,
hipStream_t cuda_stream);
template void
CalSpaceToDepth<uint16_t>(const size_t size, const uint16_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint16_t *output, hipStream_t cuda_stream);
template void
CalSpaceToDepth<uint32_t>(const size_t size, const uint32_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint32_t *output, hipStream_t cuda_stream);
template void
CalSpaceToDepth<uint64_t>(const size_t size, const uint64_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint64_t *output, hipStream_t cuda_stream);
| c550ff0162454993c5e88bad3215b615ba522d1c.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include "spacetodepth_impl.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void SpaceToDepth(const size_t size, const T *input, const size_t in,
const size_t ic, const size_t ih, const size_t iw,
const size_t on, const size_t oc, const size_t oh,
const size_t ow, const size_t r, T *output) {
size_t temp_stride = 0;
size_t temp_pos = 0;
size_t output_pos = 0;
size_t input_pos_array[SPACETODEPTH_BUFFER_DIMENSION];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
temp_stride = ic * ih * iw;
input_pos_array[0] = pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= ic;
input_pos_array[1] = temp_pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= ih;
input_pos_array[2] = temp_pos / temp_stride;
temp_pos = pos % temp_stride;
temp_stride /= iw;
input_pos_array[3] = temp_pos / temp_stride;
output_pos += input_pos_array[0];
output_pos = (output_pos * oc) +
(input_pos_array[1] +
(r * (input_pos_array[2] % r) + input_pos_array[3] % r) * ic);
output_pos = (output_pos * oh) + (input_pos_array[2] / r);
output_pos = (output_pos * ow) + (input_pos_array[3] / r);
output[output_pos] = input[pos];
output_pos = 0;
}
return;
}
template <typename T>
void CalSpaceToDepth(const size_t size, const T *input, const size_t in,
const size_t ic, const size_t ih, const size_t iw,
const size_t on, const size_t oc, const size_t oh,
const size_t ow, const size_t r, T *output,
cudaStream_t cuda_stream) {
SpaceToDepth<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size, input, in, ic, ih, iw, on, oc, oh, ow, r, output);
return;
}
template void CalSpaceToDepth<float>(const size_t size, const float *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, float *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<half>(const size_t size, const half *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, half *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<int>(const size_t size, const int *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<int64_t>(const size_t size, const int64_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int64_t *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<int16_t>(const size_t size, const int16_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int16_t *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<int8_t>(const size_t size, const int8_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, int8_t *output,
cudaStream_t cuda_stream);
template void CalSpaceToDepth<uint8_t>(const size_t size, const uint8_t *input,
const size_t in, const size_t ic,
const size_t ih, const size_t iw,
const size_t on, const size_t oc,
const size_t oh, const size_t ow,
const size_t r, uint8_t *output,
cudaStream_t cuda_stream);
template void
CalSpaceToDepth<uint16_t>(const size_t size, const uint16_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint16_t *output, cudaStream_t cuda_stream);
template void
CalSpaceToDepth<uint32_t>(const size_t size, const uint32_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint32_t *output, cudaStream_t cuda_stream);
template void
CalSpaceToDepth<uint64_t>(const size_t size, const uint64_t *input,
const size_t in, const size_t ic, const size_t ih,
const size_t iw, const size_t on, const size_t oc,
const size_t oh, const size_t ow, const size_t r,
uint64_t *output, cudaStream_t cuda_stream);
|
e4dbbe80da497f598d706a39d148576278a01192.hip | // !!! This is a file automatically generated by hipify!!!
/**
* jrc_cuda_rho.cu
* block loading rho calculation. should be much faster
* system('nvcc -ptx -m 64 -arch sm_35 jrc_cuda_rho.cu')
* i1 is multiple of chunk (16)
* J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11
* 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc)
*/
#include <hip/hip_runtime.h>
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define MAXDIM 45
#define CHUNKSIZE 16
#define SINGLE_INF (3.402E+38) // equivalent to NAN. consider -1 value
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* Step through one B at a time
*/
__global__ void jrc_cuda_rho(float *rho, const float *site_features, const int *spike_order, const int *site_constants, const float dist_cut2) {
int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNKSIZE; // base index of i1
int thread_x = threadIdx.x; // nThreadsGPU for i12 index
int i1_thread_x = i1 + thread_x;
int n_spikes_primary = site_constants[0];
int n_spikes_all = site_constants[1];
int n_features = site_constants[2];
int dn_max = site_constants[3];
int fDc_spk = site_constants[4];
__shared__ int spike_order_chunk[CHUNKSIZE];
__shared__ float features_primary[MAXDIM][CHUNKSIZE];
__shared__ int rho_chunk[NTHREADS][CHUNKSIZE]; // count then divide later
__shared__ int mnComp1_[NTHREADS][CHUNKSIZE]; // count number of elements compared
__shared__ float vrDc1_[CHUNKSIZE]; // use if fDc_spk=1
// cache shared memory
if (thread_x < n_features) {
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
int i1_c = i_c + i1;
if (i1_c < n_spikes_primary) {
features_primary[thread_x][i_c] = site_features[thread_x + i1_c * n_features];
} else {
features_primary[thread_x][i_c] = 0.0f;
}
}
}
if (thread_x < CHUNKSIZE && i1_thread_x < n_spikes_primary) {
spike_order_chunk[thread_x] = spike_order[i1_thread_x];
}
// initialize rho
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
rho_chunk[thread_x][i_c] = 0;
mnComp1_[thread_x][i_c] = 0;
}
// calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1
if (thread_x < CHUNKSIZE && fDc_spk == 1) {
vrDc1_[thread_x] = 0.0f; //init
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float temp = features_primary[i_feature][thread_x];
vrDc1_[thread_x] += (temp * temp);
}
vrDc1_[thread_x] *= dist_cut2;
}
__syncthreads();
// Inspect distance relationship between i1 and i12_tx
for (int i12_tx = thread_x; i12_tx < n_spikes_all; i12_tx += blockDim.x) {
int iiSpk12_ord_tx = spike_order[i12_tx];
// compute distance
float feature_dists2_chunk[CHUNKSIZE]; // square of pairwise feature distances for chunk
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
feature_dists2_chunk[i_c] = 0.0f;
}
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float fet12_tx = site_features[i_feature + i12_tx * n_features];
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
float temp = fet12_tx - features_primary[i_feature][i_c]; // z_i = x_i - y_i
feature_dists2_chunk[i_c] += temp * temp; // dist += z_i^2
}
}
// Compare the index and distance
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
int time_dist = ABS(spike_order_chunk[i_c] - iiSpk12_ord_tx);
if (time_dist <= dn_max) {
++mnComp1_[thread_x][i_c];
if (fDc_spk == 0) {
if (feature_dists2_chunk[i_c] <= dist_cut2) {
++rho_chunk[thread_x][i_c];
}
} else {
if (feature_dists2_chunk[i_c] < vrDc1_[i_c]) {
++rho_chunk[thread_x][i_c];
}
}
}
}
} // for
// final count
__syncthreads();
if (thread_x < CHUNKSIZE) { // use thread_x as i_c
int nRho1 = 0;
int nComp1 = 0;
for (int tx1 = 0; tx1 < blockDim.x; tx1++) {
nRho1 += rho_chunk[tx1][thread_x];
nComp1 += mnComp1_[tx1][thread_x];
}
if (i1_thread_x < n_spikes_primary) {
rho[i1_thread_x] = (float)(((double) (nRho1)) / ((double) nComp1));
}
}
} | e4dbbe80da497f598d706a39d148576278a01192.cu | /**
* jrc_cuda_rho.cu
* block loading rho calculation. should be much faster
* system('nvcc -ptx -m 64 -arch sm_35 jrc_cuda_rho.cu')
* i1 is multiple of chunk (16)
* J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11
* 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc)
*/
#include <cuda_runtime.h>
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define MAXDIM 45
#define CHUNKSIZE 16
#define SINGLE_INF (3.402E+38) // equivalent to NAN. consider -1 value
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* Step through one B at a time
*/
__global__ void jrc_cuda_rho(float *rho, const float *site_features, const int *spike_order, const int *site_constants, const float dist_cut2) {
int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNKSIZE; // base index of i1
int thread_x = threadIdx.x; // nThreadsGPU for i12 index
int i1_thread_x = i1 + thread_x;
int n_spikes_primary = site_constants[0];
int n_spikes_all = site_constants[1];
int n_features = site_constants[2];
int dn_max = site_constants[3];
int fDc_spk = site_constants[4];
__shared__ int spike_order_chunk[CHUNKSIZE];
__shared__ float features_primary[MAXDIM][CHUNKSIZE];
__shared__ int rho_chunk[NTHREADS][CHUNKSIZE]; // count then divide later
__shared__ int mnComp1_[NTHREADS][CHUNKSIZE]; // count number of elements compared
__shared__ float vrDc1_[CHUNKSIZE]; // use if fDc_spk=1
// cache shared memory
if (thread_x < n_features) {
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
int i1_c = i_c + i1;
if (i1_c < n_spikes_primary) {
features_primary[thread_x][i_c] = site_features[thread_x + i1_c * n_features];
} else {
features_primary[thread_x][i_c] = 0.0f;
}
}
}
if (thread_x < CHUNKSIZE && i1_thread_x < n_spikes_primary) {
spike_order_chunk[thread_x] = spike_order[i1_thread_x];
}
// initialize rho
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
rho_chunk[thread_x][i_c] = 0;
mnComp1_[thread_x][i_c] = 0;
}
// calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1
if (thread_x < CHUNKSIZE && fDc_spk == 1) {
vrDc1_[thread_x] = 0.0f; //init
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float temp = features_primary[i_feature][thread_x];
vrDc1_[thread_x] += (temp * temp);
}
vrDc1_[thread_x] *= dist_cut2;
}
__syncthreads();
// Inspect distance relationship between i1 and i12_tx
for (int i12_tx = thread_x; i12_tx < n_spikes_all; i12_tx += blockDim.x) {
int iiSpk12_ord_tx = spike_order[i12_tx];
// compute distance
float feature_dists2_chunk[CHUNKSIZE]; // square of pairwise feature distances for chunk
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
feature_dists2_chunk[i_c] = 0.0f;
}
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float fet12_tx = site_features[i_feature + i12_tx * n_features];
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
float temp = fet12_tx - features_primary[i_feature][i_c]; // z_i = x_i - y_i
feature_dists2_chunk[i_c] += temp * temp; // dist += z_i^2
}
}
// Compare the index and distance
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
int time_dist = ABS(spike_order_chunk[i_c] - iiSpk12_ord_tx);
if (time_dist <= dn_max) {
++mnComp1_[thread_x][i_c];
if (fDc_spk == 0) {
if (feature_dists2_chunk[i_c] <= dist_cut2) {
++rho_chunk[thread_x][i_c];
}
} else {
if (feature_dists2_chunk[i_c] < vrDc1_[i_c]) {
++rho_chunk[thread_x][i_c];
}
}
}
}
} // for
// final count
__syncthreads();
if (thread_x < CHUNKSIZE) { // use thread_x as i_c
int nRho1 = 0;
int nComp1 = 0;
for (int tx1 = 0; tx1 < blockDim.x; tx1++) {
nRho1 += rho_chunk[tx1][thread_x];
nComp1 += mnComp1_[tx1][thread_x];
}
if (i1_thread_x < n_spikes_primary) {
rho[i1_thread_x] = (float)(((double) (nRho1)) / ((double) nComp1));
}
}
} |
532ee461ff6a9600c68021e1d61080ef71e32308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-10-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 532ee461ff6a9600c68021e1d61080ef71e32308.cu | #include <assert.h>
#include <stdio.h>
#include "box2d2r-512-10-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
d850c46e84e7f56a32d4bd095824daf1c8ca8f78.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cuda_gl_interop.h>
#include <assert.h>
#include <sys/time.h>
#define Tix 32
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
__global__ void GaussJordan_gpu(float *Aaug, float *subpivot, int N, int iter) {
int c = iter + blockIdx.x * Tix*Tix;
int r = blockIdx.y;
float scale;
int ti = threadIdx.x;
scale = Aaug[r*2*N+iter];
__shared__ float col[Tix*Tix];
__shared__ float colj[Tix*Tix];
if (r != iter){
if (c + ti < 2*N){
col[ti] = Aaug[iter*2*N+c+ti];
colj[ti] = Aaug[r*2*N+c+ti];
colj[ti] -= scale*col[ti];
Aaug[r*2*N+c+ti] = colj[ti];
}
if (blockIdx.x == 0){
if (ti == 0){
subpivot[r] = Aaug[r*2*N+iter+1];
}
}
}
}
__global__ void update_row_gpu(float *Aaug, float *subpivot, int N, int iter, int use) {
int c = iter + blockIdx.x * Tix*Tix;
int ti = threadIdx.x;
if (c + ti < 2*N){
Aaug[iter*2*N+c+ti] += Aaug[use*2*N+c+ti];
}
if (blockIdx.x == 0){
if (ti == 0){
subpivot[iter] = Aaug[iter+iter*2*N];
}
}
}
__global__ void scale_row_gpu(float *Aaug, float *subpivot, int N, int iter) {
int c = iter + blockIdx.x * Tix*Tix;
int ti = threadIdx.x;
if (c + ti < 2*N){
Aaug[iter*2*N+c+ti] = Aaug[iter*2*N+c+ti]/subpivot[iter];
}
}
int main(int argc, char *argv[]){
float *Aaug, *Aaug_cu, *subpivot, *subpivot_cu;
int iter, m, i, j, N, use;
FILE * f;
// Checks to see valid number of inputs given
if (argc != 3)
{
printf("need input and N\n");
return -1;
}
N = strtol(argv[2],NULL,10);
// Checks to see if a valid .mtx file was given
int memSize = 2*N*N*sizeof(float);
Aaug = (float *)malloc(memSize);
subpivot = (float *)malloc(N*sizeof(float));
f = fopen(argv[1], "rb");
for (i=0; i<N; i++){
for (j=0; j<N; j++){
fscanf(f, "%f", &Aaug[2*N*i+j]);
if (i==j){
Aaug[2*N*i+N+j] = 1;
}
else{
Aaug[2*N*i+N+j] = 0;
}
}
subpivot[i] = Aaug[i*2*N];
}
fclose(f);
hipMalloc((void**)&Aaug_cu, memSize);
hipMalloc((void**)&subpivot_cu, N*sizeof(float));
hipMemcpy(Aaug_cu, Aaug, memSize, hipMemcpyHostToDevice);
hipMemcpy(subpivot_cu, subpivot, N*sizeof(float), hipMemcpyHostToDevice);
// Runs GPU Code
int bn, rn;
dim3 nblocks, nthreads, nblocks_1, nthreads_1;
nthreads.x = Tix*Tix;
nthreads.y = 1;
nthreads_1.x = Tix*Tix;
nthreads_1.y = 1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for (iter=0;iter<N; iter++){
bn = MAX((2*N-iter)/(Tix*Tix),1); // Defines number of subdivisions in the row
rn = N; // Defines how many rows to update
nblocks.x = bn;
nblocks.y = rn;
nblocks_1.x = bn;
nblocks_1.y = 1;
if (sqrt(subpivot[iter]*subpivot[iter])<.00000000000001){ // checks for invertability
for (m=1; m+iter<N; m++){ // loops through lower rows for nonzero in pivot
if (sqrt(subpivot[iter+m]*subpivot[iter+m])>.000000000000001){ // checks if nonzero pivot
use = m+iter;
goto update; // exits if pivot found
}
else if(m==N-1){
printf("Error matrix not invertible \n"); // if no pivot found, not inverable
exit(-1);
}
}
printf("Error matrix not invertible \n"); // if at the last pivot and zero, not invertable
exit(-1);
update:hipLaunchKernelGGL(( update_row_gpu), dim3(nblocks_1), dim3(nthreads_1), 0, 0, Aaug_cu, subpivot_cu, N, iter, use);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( scale_row_gpu), dim3(nblocks_1), dim3(nthreads_1), 0, 0, Aaug_cu, subpivot_cu, N, iter);
hipDeviceSynchronize();
if(iter<N){ // Won't perform reduction if iter = N (at the bottom)
hipLaunchKernelGGL(( GaussJordan_gpu), dim3(nblocks), dim3(nthreads), 0, 0, Aaug_cu, subpivot_cu, N, iter);
hipDeviceSynchronize();
hipMemcpy(subpivot, subpivot_cu, N*sizeof(float), hipMemcpyDeviceToHost);
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf ("%f\n", milliseconds / 1e3);
hipMemcpy(Aaug, Aaug_cu, memSize, hipMemcpyDeviceToHost);
return 0;
}
| d850c46e84e7f56a32d4bd095824daf1c8ca8f78.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_gl_interop.h>
#include <assert.h>
#include <sys/time.h>
#define Tix 32
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
__global__ void GaussJordan_gpu(float *Aaug, float *subpivot, int N, int iter) {
int c = iter + blockIdx.x * Tix*Tix;
int r = blockIdx.y;
float scale;
int ti = threadIdx.x;
scale = Aaug[r*2*N+iter];
__shared__ float col[Tix*Tix];
__shared__ float colj[Tix*Tix];
if (r != iter){
if (c + ti < 2*N){
col[ti] = Aaug[iter*2*N+c+ti];
colj[ti] = Aaug[r*2*N+c+ti];
colj[ti] -= scale*col[ti];
Aaug[r*2*N+c+ti] = colj[ti];
}
if (blockIdx.x == 0){
if (ti == 0){
subpivot[r] = Aaug[r*2*N+iter+1];
}
}
}
}
__global__ void update_row_gpu(float *Aaug, float *subpivot, int N, int iter, int use) {
int c = iter + blockIdx.x * Tix*Tix;
int ti = threadIdx.x;
if (c + ti < 2*N){
Aaug[iter*2*N+c+ti] += Aaug[use*2*N+c+ti];
}
if (blockIdx.x == 0){
if (ti == 0){
subpivot[iter] = Aaug[iter+iter*2*N];
}
}
}
__global__ void scale_row_gpu(float *Aaug, float *subpivot, int N, int iter) {
int c = iter + blockIdx.x * Tix*Tix;
int ti = threadIdx.x;
if (c + ti < 2*N){
Aaug[iter*2*N+c+ti] = Aaug[iter*2*N+c+ti]/subpivot[iter];
}
}
int main(int argc, char *argv[]){
float *Aaug, *Aaug_cu, *subpivot, *subpivot_cu;
int iter, m, i, j, N, use;
FILE * f;
// Checks to see valid number of inputs given
if (argc != 3)
{
printf("need input and N\n");
return -1;
}
N = strtol(argv[2],NULL,10);
// Checks to see if a valid .mtx file was given
int memSize = 2*N*N*sizeof(float);
Aaug = (float *)malloc(memSize);
subpivot = (float *)malloc(N*sizeof(float));
f = fopen(argv[1], "rb");
for (i=0; i<N; i++){
for (j=0; j<N; j++){
fscanf(f, "%f", &Aaug[2*N*i+j]);
if (i==j){
Aaug[2*N*i+N+j] = 1;
}
else{
Aaug[2*N*i+N+j] = 0;
}
}
subpivot[i] = Aaug[i*2*N];
}
fclose(f);
cudaMalloc((void**)&Aaug_cu, memSize);
cudaMalloc((void**)&subpivot_cu, N*sizeof(float));
cudaMemcpy(Aaug_cu, Aaug, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(subpivot_cu, subpivot, N*sizeof(float), cudaMemcpyHostToDevice);
// Runs GPU Code
int bn, rn;
dim3 nblocks, nthreads, nblocks_1, nthreads_1;
nthreads.x = Tix*Tix;
nthreads.y = 1;
nthreads_1.x = Tix*Tix;
nthreads_1.y = 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for (iter=0;iter<N; iter++){
bn = MAX((2*N-iter)/(Tix*Tix),1); // Defines number of subdivisions in the row
rn = N; // Defines how many rows to update
nblocks.x = bn;
nblocks.y = rn;
nblocks_1.x = bn;
nblocks_1.y = 1;
if (sqrt(subpivot[iter]*subpivot[iter])<.00000000000001){ // checks for invertability
for (m=1; m+iter<N; m++){ // loops through lower rows for nonzero in pivot
if (sqrt(subpivot[iter+m]*subpivot[iter+m])>.000000000000001){ // checks if nonzero pivot
use = m+iter;
goto update; // exits if pivot found
}
else if(m==N-1){
printf("Error matrix not invertible \n"); // if no pivot found, not inverable
exit(-1);
}
}
printf("Error matrix not invertible \n"); // if at the last pivot and zero, not invertable
exit(-1);
update: update_row_gpu<<<nblocks_1, nthreads_1>>>(Aaug_cu, subpivot_cu, N, iter, use);
cudaDeviceSynchronize();
}
scale_row_gpu<<<nblocks_1, nthreads_1>>>(Aaug_cu, subpivot_cu, N, iter);
cudaDeviceSynchronize();
if(iter<N){ // Won't perform reduction if iter = N (at the bottom)
GaussJordan_gpu<<<nblocks, nthreads>>>(Aaug_cu, subpivot_cu, N, iter);
cudaDeviceSynchronize();
cudaMemcpy(subpivot, subpivot_cu, N*sizeof(float), cudaMemcpyDeviceToHost);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf ("%f\n", milliseconds / 1e3);
cudaMemcpy(Aaug, Aaug_cu, memSize, cudaMemcpyDeviceToHost);
return 0;
}
|
faaaa0152e6665ca7b2a4c7e161dd36492721730.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
using namespace std;
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <Eigen/Dense>
#include <Eigen/Core>
//const int N = 131072; //2^17
const int N = 7;
__global__
void hello(float * b )
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
b[index] *= 2;
}
void displayDeviceInfo()
{
int devCount;
hipGetDeviceCount(&devCount);
printf( "[# of cuda devices] : %d \n", devCount );
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0 );
printf( "[Device Name] : %s\n", prop.name );
printf( "[maxThreadsPerMultiProcessor] : %d\n", prop.maxThreadsPerMultiProcessor );
printf( "[maxThreadsPerBlock] : %d\n", prop.maxThreadsPerBlock );
}
void printArray( int * ary, int nCount )
{
printf( "[ ");
if( nCount < 15 )
{
for( int i=0 ; i<nCount ; i++ )
printf( "%3d, ", ary[i]);
}
else
{
for( int i=0 ; i<5 ; i++ )
printf( "%3d, ", ary[i]);
printf( "., ., ., ");
for( int i=nCount-6 ; i<nCount ; i++ )
printf( "%3d, ", ary[i]);
}
printf( "]\n");
}
void printArrayFloat( float * ary, int nCount )
{
printf( "[ ");
if( nCount < 15 )
{
for( int i=0 ; i<nCount ; i++ )
printf( "%3.3f, ", ary[i]);
}
else
{
for( int i=0 ; i<5 ; i++ )
printf( "%3.3f, ", ary[i]);
printf( "., ., ., ");
for( int i=nCount-6 ; i<nCount ; i++ )
printf( "%3.3f, ", ary[i]);
}
printf( "]\n");
}
void printMatrixColMajor( float *mat, int rows, int cols )
{
printf( "[ ");
//note: matrix is in col-major format
for( int i=0 ; i<rows ; i++ )
{
for( int j=0 ; j<cols ; j++ )
{
int index = i + j*cols;
printf( "%5.3f, ", mat[index]);
}
printf( "\n");
}
printf( "]\n");
}
int verifyVector( int len, float * cpu, float * gpu )
{
//verify
int notMatchCount = 0 ;
for( int i=0 ; i<N*N ; i++ )
{
if( cpu[i] != gpu[i] )
{
//cout << "Does not match at : "<< i << endl;
notMatchCount++;
}
}
cout << "Does not match at : `"<< notMatchCount << "` locations"<< endl;
return notMatchCount;
}
int testmain()
{
displayDeviceInfo();
//int b[N] = {15, 10, 6, 0, -11, 1, 0};
float *b = new float[N];
for( int i=0 ; i<N ; i++ )
b[i] = (float) ( rand()%100 ) - 50.0;
printArrayFloat(b,N);
Eigen::VectorXf eig_b(N);
for( int i=0 ; i<N ; i++ )
eig_b(i) = b[i];
std::cout << "B (eigen): " << eig_b.transpose() << std::endl;
Eigen::MatrixXf eig_out = eig_b * eig_b.transpose();
std::cout << "WIth Eigen B*B': "<< eig_out << std::endl;
float *b_dev, *by_dev;
const int isize = N*sizeof(float);
hipMalloc((void **)&b_dev, isize );
hipMalloc((void **)&by_dev, isize );
// float outb[N];
// hipMalloc( (void**)&b_dev, isize );
// hipMemcpy( b_dev, b, isize, hipMemcpyHostToDevice );
// dim3 dimBlock( 1024, 1 );
// dim3 dimGrid( N/1024, 1 );
// hello<<<dimGrid, dimBlock>>>(b_dev);
// hipMemcpy( outb, b_dev, isize, hipMemcpyDeviceToHost );
// printArray( b, N );
// printArray( outb, N );
hipblasHandle_t handle;
hipblasCreate(&handle);
// hipblasSetVector(N, sizeof(float), (void*)b, 1, b_dev, 1);
// int resultIndx;
// //max
// hipblasIsamax(handle, N, b_dev, 1, &resultIndx);
// printf( "Max [%d]: %f\n", resultIndx, b[resultIndx-1] );
// //min
// hipblasIsamin(handle, N, b_dev, 1, &resultIndx);
// printf( "Min [%d]: %f\n", resultIndx, b[resultIndx-1] );
// cublas rank-1 update
//create and populate matrix A
float *A_dev;
hipMalloc((void**)&A_dev, N*N*sizeof(float));
Eigen::MatrixXf A = Eigen::MatrixXf::Zero(eig_b.rows(), eig_b.rows() );
std::cout << "Matrix A : "<< A << std::endl;
hipblasSetVector(N, sizeof(float), (void*)b, 1, b_dev, 1);
hipblasSetVector(N, sizeof(float), (void*)b, 1, by_dev, 1);
hipblasSetMatrix(A.rows(), A.cols(), sizeof(float), (void*)A.data(), A.rows(), (void*)A_dev, A.rows() );
float alpha = 1.0f;
hipblasSger( handle, A.rows(), A.cols(), &alpha, b_dev, 1, by_dev, 1, A_dev, A.rows() );
float * resA = new float[N*N];
float * resVec = new float[N];
hipblasGetMatrix(A.rows(), A.cols(), sizeof(float), A_dev, A.rows(), resA, A.rows() );
hipblasGetVector(N, sizeof(float), by_dev, 1, resVec, 1 );
//std::cout << "Resulting A: "<< A << std::endl;
cout << "resulting matrix : "; printMatrixColMajor( resA, N, N );
//verify
float *cpu = eig_out.data();
verifyVector( N*N, cpu, resA );
hipFree((void*)b_dev);
hipblasDestroy(handle);
delete [] b;
return EXIT_SUCCESS;
}
| faaaa0152e6665ca7b2a4c7e161dd36492721730.cu | #include <stdio.h>
#include <iostream>
using namespace std;
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <Eigen/Dense>
#include <Eigen/Core>
//const int N = 131072; //2^17
const int N = 7;
__global__
void hello(float * b )
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
b[index] *= 2;
}
void displayDeviceInfo()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf( "[# of cuda devices] : %d \n", devCount );
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0 );
printf( "[Device Name] : %s\n", prop.name );
printf( "[maxThreadsPerMultiProcessor] : %d\n", prop.maxThreadsPerMultiProcessor );
printf( "[maxThreadsPerBlock] : %d\n", prop.maxThreadsPerBlock );
}
void printArray( int * ary, int nCount )
{
printf( "[ ");
if( nCount < 15 )
{
for( int i=0 ; i<nCount ; i++ )
printf( "%3d, ", ary[i]);
}
else
{
for( int i=0 ; i<5 ; i++ )
printf( "%3d, ", ary[i]);
printf( "., ., ., ");
for( int i=nCount-6 ; i<nCount ; i++ )
printf( "%3d, ", ary[i]);
}
printf( "]\n");
}
void printArrayFloat( float * ary, int nCount )
{
printf( "[ ");
if( nCount < 15 )
{
for( int i=0 ; i<nCount ; i++ )
printf( "%3.3f, ", ary[i]);
}
else
{
for( int i=0 ; i<5 ; i++ )
printf( "%3.3f, ", ary[i]);
printf( "., ., ., ");
for( int i=nCount-6 ; i<nCount ; i++ )
printf( "%3.3f, ", ary[i]);
}
printf( "]\n");
}
void printMatrixColMajor( float *mat, int rows, int cols )
{
printf( "[ ");
//note: matrix is in col-major format
for( int i=0 ; i<rows ; i++ )
{
for( int j=0 ; j<cols ; j++ )
{
int index = i + j*cols;
printf( "%5.3f, ", mat[index]);
}
printf( "\n");
}
printf( "]\n");
}
int verifyVector( int len, float * cpu, float * gpu )
{
//verify
int notMatchCount = 0 ;
for( int i=0 ; i<N*N ; i++ )
{
if( cpu[i] != gpu[i] )
{
//cout << "Does not match at : "<< i << endl;
notMatchCount++;
}
}
cout << "Does not match at : `"<< notMatchCount << "` locations"<< endl;
return notMatchCount;
}
int testmain()
{
displayDeviceInfo();
//int b[N] = {15, 10, 6, 0, -11, 1, 0};
float *b = new float[N];
for( int i=0 ; i<N ; i++ )
b[i] = (float) ( rand()%100 ) - 50.0;
printArrayFloat(b,N);
Eigen::VectorXf eig_b(N);
for( int i=0 ; i<N ; i++ )
eig_b(i) = b[i];
std::cout << "B (eigen): " << eig_b.transpose() << std::endl;
Eigen::MatrixXf eig_out = eig_b * eig_b.transpose();
std::cout << "WIth Eigen B*B': "<< eig_out << std::endl;
float *b_dev, *by_dev;
const int isize = N*sizeof(float);
cudaMalloc((void **)&b_dev, isize );
cudaMalloc((void **)&by_dev, isize );
// float outb[N];
// cudaMalloc( (void**)&b_dev, isize );
// cudaMemcpy( b_dev, b, isize, cudaMemcpyHostToDevice );
// dim3 dimBlock( 1024, 1 );
// dim3 dimGrid( N/1024, 1 );
// hello<<<dimGrid, dimBlock>>>(b_dev);
// cudaMemcpy( outb, b_dev, isize, cudaMemcpyDeviceToHost );
// printArray( b, N );
// printArray( outb, N );
cublasHandle_t handle;
cublasCreate(&handle);
// cublasSetVector(N, sizeof(float), (void*)b, 1, b_dev, 1);
// int resultIndx;
// //max
// cublasIsamax(handle, N, b_dev, 1, &resultIndx);
// printf( "Max [%d]: %f\n", resultIndx, b[resultIndx-1] );
// //min
// cublasIsamin(handle, N, b_dev, 1, &resultIndx);
// printf( "Min [%d]: %f\n", resultIndx, b[resultIndx-1] );
// cublas rank-1 update
//create and populate matrix A
float *A_dev;
cudaMalloc((void**)&A_dev, N*N*sizeof(float));
Eigen::MatrixXf A = Eigen::MatrixXf::Zero(eig_b.rows(), eig_b.rows() );
std::cout << "Matrix A : "<< A << std::endl;
cublasSetVector(N, sizeof(float), (void*)b, 1, b_dev, 1);
cublasSetVector(N, sizeof(float), (void*)b, 1, by_dev, 1);
cublasSetMatrix(A.rows(), A.cols(), sizeof(float), (void*)A.data(), A.rows(), (void*)A_dev, A.rows() );
float alpha = 1.0f;
cublasSger( handle, A.rows(), A.cols(), &alpha, b_dev, 1, by_dev, 1, A_dev, A.rows() );
float * resA = new float[N*N];
float * resVec = new float[N];
cublasGetMatrix(A.rows(), A.cols(), sizeof(float), A_dev, A.rows(), resA, A.rows() );
cublasGetVector(N, sizeof(float), by_dev, 1, resVec, 1 );
//std::cout << "Resulting A: "<< A << std::endl;
cout << "resulting matrix : "; printMatrixColMajor( resA, N, N );
//verify
float *cpu = eig_out.data();
verifyVector( N*N, cpu, resA );
cudaFree((void*)b_dev);
cublasDestroy(handle);
delete [] b;
return EXIT_SUCCESS;
}
|
36c809725c4f294266274ecee55bdd16fc9ec173.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <time.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cublasXt.h>
#include "gpu_blas_test.h"
#include "util.h"
#define HANDLE_CUDA_ERROR( err ) ( HandleCudaError( err, __FILE__, __LINE__ ) )
static void HandleCudaError(hipError_t err, const char *file, int line)
{
if (err != hipSuccess)
{
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_CUBLAS_ERROR( err, str ) ( HandleCublasError( err, __FILE__, __LINE__, str) )
static void HandleCublasError(hipblasStatus_t err, const char *file, int line, const char *str)
{
if (err != HIPBLAS_STATUS_SUCCESS)
{
printf("error %s %d in %s at line %d\n", str, err, // why no cublasGetErrorString?
file, line);
exit(EXIT_FAILURE);
}
}
void list_cuda_devices()
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
}
int gpu_cublas_sgemm(int loops, int M, int N, int K, float alpha, float beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS sgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS sgemm,%d,%d,%d,%d,%d,%f,%f", gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(num_gpus == 1);
hipSetDevice(gpu_ids[0]);
hipblasHandle_t handle;
HANDLE_CUBLAS_ERROR(hipblasCreate(&handle),"hipblasCreate fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
clock_t start2, stop2;
start = clock();
float *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "hipblasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "hipblasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "hipblasSetMatrix C fail");
hipDeviceSynchronize();
start2 = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Sgemm fail");
}
hipDeviceSynchronize();
stop2 = clock();
HANDLE_CUBLAS_ERROR(hipblasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "hipblasGetMatrix C fail");
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(!csv_output) {
printf("ON DEVICE TIME:");
}
summarize_sgemm(c, loops, M, N, K, alpha, beta, start2, stop2, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipblasDestroy(handle);
return 0;
}
int gpu_cublas_dgemm(int loops, int M, int N, int K, double alpha, double beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS dgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS dgemm,%d,%d,%d,%d,%d,%f,%f", gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(num_gpus == 1);
hipSetDevice(gpu_ids[0]);
hipblasHandle_t handle;
HANDLE_CUBLAS_ERROR(hipblasCreate(&handle), "hipblasCreate fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
clock_t start2, stop2;
start = clock();
double *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "hipblasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "hipblasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "hipblasSetMatrix C fail");
hipDeviceSynchronize();
start2 = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Dgemm fail");
}
hipDeviceSynchronize();
stop2 = clock();
HANDLE_CUBLAS_ERROR(hipblasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "hipblasGetMatrix C fail");
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(!csv_output) {
printf("ON DEVICE TIME:");
}
summarize_dgemm(c, loops, M, N, K, alpha, beta, start2, stop2, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipblasDestroy(handle);
return 0;
}
int gpu_cublasxt_sgemm(int loops, int M, int N, int K, float alpha, float beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT sgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT sgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Sgemm fail");
}
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublasxt_dgemm(int loops, int M, int N, int K, double alpha, double beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT dgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT dgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Dgemm fail");
}
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublas_ssyrkgemm(int loops, int M, int N, int K, float alpha, float beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS ssyrkgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS ssyrkgemm,%d,%d,%d,%d,%d,%f,%f",gpu_ids[0],loops, M, N, K, alpha, beta);
}
assert(M == N);
assert(num_gpus == 1);
hipSetDevice(gpu_ids[0]);
hipblasHandle_t handle;
HANDLE_CUBLAS_ERROR(hipblasCreate(&handle), "hipblasCreate fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
start = clock();
float *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "hipblasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "hipblasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "hipblasSetMatrix C fail");
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(hipblasSsyrk(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, N, K, &alpha, dev_a, M, &beta, dev_c, N), "Ssyrk fail");
HANDLE_CUBLAS_ERROR(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Sgemm fail");
}
HANDLE_CUBLAS_ERROR(hipblasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "hipblasGetMatrix C fail");
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipblasDestroy(handle);
return 0;
}
int gpu_cublas_dsyrkgemm(int loops, int M, int N, int K, double alpha, double beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS dsyrkgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS dsyrkgemm,%d,%d,%d,%d,%d,%f,%f",gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(M == N);
assert(num_gpus == 1);
hipSetDevice(gpu_ids[0]);
hipblasHandle_t handle;
HANDLE_CUBLAS_ERROR(hipblasCreate(&handle), "hipblasCreate fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
start = clock();
double *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "hipblasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "hipblasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(hipblasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "hipblasSetMatrix C fail");
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(hipblasDsyrk(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, N, K, &alpha, dev_a, M, &beta, dev_c, N), "Dsyrk fail");
HANDLE_CUBLAS_ERROR(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Dgemm fail");
}
HANDLE_CUBLAS_ERROR(hipblasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "hipblasGetMatrix C fail");
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipblasDestroy(handle);
return 0;
}
int gpu_cublasxt_ssyrkgemm(int loops, int M, int N, int K, float alpha, float beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT ssyrkgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT ssyrkgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
assert(M == N);
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtSsyrk(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, N, K, &alpha, a, M, &beta, c, N), "Ssyrk fail");
HANDLE_CUBLAS_ERROR(cublasXtSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Sgemm fail");
}
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublasxt_dsyrkgemm(int loops, int M, int N, int K, double alpha, double beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT dsyrkgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT dsyrkgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
assert(M == N);
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtDsyrk(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, N, K, &alpha, a, M, &beta, c, N), "Dsyrk fail");
HANDLE_CUBLAS_ERROR(cublasXtDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Dgemm fail");
}
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cublasXtDestroy(handle);
return 0;
}
| 36c809725c4f294266274ecee55bdd16fc9ec173.cu | #include <assert.h>
#include <time.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cublasXt.h>
#include "gpu_blas_test.h"
#include "util.h"
#define HANDLE_CUDA_ERROR( err ) ( HandleCudaError( err, __FILE__, __LINE__ ) )
static void HandleCudaError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_CUBLAS_ERROR( err, str ) ( HandleCublasError( err, __FILE__, __LINE__, str) )
static void HandleCublasError(cublasStatus_t err, const char *file, int line, const char *str)
{
if (err != CUBLAS_STATUS_SUCCESS)
{
printf("error %s %d in %s at line %d\n", str, err, // why no cublasGetErrorString?
file, line);
exit(EXIT_FAILURE);
}
}
void list_cuda_devices()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
}
int gpu_cublas_sgemm(int loops, int M, int N, int K, float alpha, float beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS sgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS sgemm,%d,%d,%d,%d,%d,%f,%f", gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(num_gpus == 1);
cudaSetDevice(gpu_ids[0]);
cublasHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasCreate(&handle),"cublasCreate fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
clock_t start2, stop2;
start = clock();
float *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "cublasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "cublasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "cublasSetMatrix C fail");
cudaDeviceSynchronize();
start2 = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Sgemm fail");
}
cudaDeviceSynchronize();
stop2 = clock();
HANDLE_CUBLAS_ERROR(cublasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "cublasGetMatrix C fail");
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(!csv_output) {
printf("ON DEVICE TIME:");
}
summarize_sgemm(c, loops, M, N, K, alpha, beta, start2, stop2, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cublasDestroy(handle);
return 0;
}
int gpu_cublas_dgemm(int loops, int M, int N, int K, double alpha, double beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS dgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS dgemm,%d,%d,%d,%d,%d,%f,%f", gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(num_gpus == 1);
cudaSetDevice(gpu_ids[0]);
cublasHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasCreate(&handle), "cublasCreate fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
clock_t start2, stop2;
start = clock();
double *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "cublasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "cublasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "cublasSetMatrix C fail");
cudaDeviceSynchronize();
start2 = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Dgemm fail");
}
cudaDeviceSynchronize();
stop2 = clock();
HANDLE_CUBLAS_ERROR(cublasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "cublasGetMatrix C fail");
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(!csv_output) {
printf("ON DEVICE TIME:");
}
summarize_dgemm(c, loops, M, N, K, alpha, beta, start2, stop2, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cublasDestroy(handle);
return 0;
}
int gpu_cublasxt_sgemm(int loops, int M, int N, int K, float alpha, float beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT sgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT sgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Sgemm fail");
}
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublasxt_dgemm(int loops, int M, int N, int K, double alpha, double beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT dgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT dgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Dgemm fail");
}
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublas_ssyrkgemm(int loops, int M, int N, int K, float alpha, float beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS ssyrkgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS ssyrkgemm,%d,%d,%d,%d,%d,%f,%f",gpu_ids[0],loops, M, N, K, alpha, beta);
}
assert(M == N);
assert(num_gpus == 1);
cudaSetDevice(gpu_ids[0]);
cublasHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasCreate(&handle), "cublasCreate fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
start = clock();
float *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "cublasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "cublasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "cublasSetMatrix C fail");
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasSsyrk(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, N, K, &alpha, dev_a, M, &beta, dev_c, N), "Ssyrk fail");
HANDLE_CUBLAS_ERROR(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Sgemm fail");
}
HANDLE_CUBLAS_ERROR(cublasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "cublasGetMatrix C fail");
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cublasDestroy(handle);
return 0;
}
int gpu_cublas_dsyrkgemm(int loops, int M, int N, int K, double alpha, double beta, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLAS dsyrkgemm: gpu=%d loops=%d M=%d N=%d K=%d alpha=%f beta=%f\n", gpu_ids[0], loops, M, N, K, alpha, beta);
list_cuda_devices();
} else {
printf("NVIDIA CUBLAS dsyrkgemm,%d,%d,%d,%d,%d,%f,%f",gpu_ids[0], loops, M, N, K, alpha, beta);
}
assert(M == N);
assert(num_gpus == 1);
cudaSetDevice(gpu_ids[0]);
cublasHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasCreate(&handle), "cublasCreate fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
// time all the extra stuff for setting up the matrices
clock_t start, stop;
start = clock();
double *dev_a, *dev_b, *dev_c;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_a, M*K*sizeof(*a)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_b, K*N*sizeof(*b)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&dev_c, M*N*sizeof(*c)));
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, K, sizeof(*a), a, M, dev_a, M), "cublasSetMatrix A fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(K, N, sizeof(*b), b, K, dev_b, K), "cublasSetMatrix B fail");
HANDLE_CUBLAS_ERROR(cublasSetMatrix(M, N, sizeof(*c), c, M, dev_c, M), "cublasSetMatrix C fail");
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasDsyrk(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, N, K, &alpha, dev_a, M, &beta, dev_c, N), "Dsyrk fail");
HANDLE_CUBLAS_ERROR(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, dev_a, M, dev_b, K, &beta, dev_c, M), "Dgemm fail");
}
HANDLE_CUBLAS_ERROR(cublasGetMatrix(M, N, sizeof(*c), dev_c, M, c, M), "cublasGetMatrix C fail");
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cublasDestroy(handle);
return 0;
}
int gpu_cublasxt_ssyrkgemm(int loops, int M, int N, int K, float alpha, float beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT ssyrkgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT ssyrkgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
assert(M == N);
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
float *a, *b, *c;
new_float_matrix(a, M, K);
new_float_matrix(b, K, N);
new_float_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtSsyrk(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, N, K, &alpha, a, M, &beta, c, N), "Ssyrk fail");
HANDLE_CUBLAS_ERROR(cublasXtSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Sgemm fail");
}
stop = clock();
summarize_sgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_float_matrix(a);
delete_float_matrix(b);
delete_float_matrix(c);
cublasXtDestroy(handle);
return 0;
}
int gpu_cublasxt_dsyrkgemm(int loops, int M, int N, int K, double alpha, double beta, int block_dim, int num_gpus, int *gpu_ids, bool csv_output)
{
if(!csv_output) {
printf("NVIDIA CUBLASXT dsyrkgemm: loops=%d M=%d N=%d K=%d alpha=%f beta=%f block_dim=%d num_gpus=%d\n", loops, M, N, K, alpha, beta, block_dim, num_gpus);
list_cuda_devices();
} else {
printf("NVIDIA CUBLASXT dsyrkgemm,%d,%d,%d,%d,%f,%f,%d,%d",loops, M, N, K, alpha, beta, block_dim, num_gpus);
}
assert(M == N);
cublasXtHandle_t handle;
HANDLE_CUBLAS_ERROR(cublasXtCreate(&handle), "cublasXtCreate fail");
HANDLE_CUBLAS_ERROR(cublasXtDeviceSelect(handle, num_gpus, gpu_ids), "cublasXtDeviceSelect fail");
HANDLE_CUBLAS_ERROR(cublasXtSetBlockDim(handle, block_dim), "cublasXtSetBlockDim fail");
double *a, *b, *c;
new_double_matrix(a, M, K);
new_double_matrix(b, K, N);
new_double_matrix(c, M, N);
clock_t start, stop;
start = clock();
for (int i = 0; i < loops; ++i) {
HANDLE_CUBLAS_ERROR(cublasXtDsyrk(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, N, K, &alpha, a, M, &beta, c, N), "Dsyrk fail");
HANDLE_CUBLAS_ERROR(cublasXtDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, a, M, b, K, &beta, c, M), "Dgemm fail");
}
stop = clock();
summarize_dgemm(c, loops, M, N, K, alpha, beta, start, stop, csv_output);
if(csv_output) {
printf("\n");
}
delete_double_matrix(a);
delete_double_matrix(b);
delete_double_matrix(c);
cublasXtDestroy(handle);
return 0;
}
|
62fb7d3a99762f8d3be8d80b7b7bd32b32bf8593.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/logical.h>
#include <thrust/pair.h>
#include <thrust/tabulate.h>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <random>
#include <unordered_map>
#include <vector>
template <typename K, typename V>
struct key_value_types {
using key_type = K;
using value_type = V;
using pair_type = thrust::pair<K, V>;
using map_type = concurrent_unordered_map<key_type, value_type>;
};
template <typename T>
struct InsertTest : public cudf::test::BaseFixture {
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using pair_type = typename T::pair_type;
using map_type = typename T::map_type;
InsertTest()
{
// prevent overflow of small types
const size_t input_size =
::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max());
pairs.resize(input_size, cudf::default_stream_value);
map = std::move(map_type::create(compute_hash_table_size(size)));
cudf::default_stream_value.synchronize();
}
const cudf::size_type size{10000};
rmm::device_uvector<pair_type> pairs{static_cast<std::size_t>(size), cudf::default_stream_value};
std::unique_ptr<map_type, std::function<void(map_type*)>> map;
};
using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>,
key_value_types<int64_t, int64_t>,
key_value_types<int8_t, int8_t>,
key_value_types<int16_t, int16_t>,
key_value_types<int8_t, float>,
key_value_types<int16_t, double>,
key_value_types<int32_t, float>,
key_value_types<int64_t, double>>;
TYPED_TEST_SUITE(InsertTest, TestTypes);
template <typename map_type, typename pair_type>
struct insert_pair {
insert_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.insert(pair);
if (result.first == map.end()) { return false; }
return result.second;
}
map_type map;
};
template <typename map_type, typename pair_type>
struct find_pair {
find_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.find(pair.first);
if (result == map.end()) { return false; }
return *result == pair;
}
map_type map;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct unique_pair_generator {
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key_type(i), value_type(i));
}
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_pair_generator {
identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {}
__device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); }
key_type key;
value_type value;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_key_generator {
identical_key_generator(key_type k = 42) : key{k} {}
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key, value_type(i));
}
key_type key;
};
TYPED_TEST(InsertTest, UniqueKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(
rmm::exec_policy(), this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{});
// All pairs should be new inserts
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
identical_pair_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
insert_pair<map_type, pair_type>{*this->map}));
// Identical inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
identical_key_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
insert_pair<map_type, pair_type>{*this->map}));
// Identical key inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin() + 1,
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// Only first pair is present in map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
find_pair<map_type, pair_type>{*this->map}));
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin() + 1,
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
CUDF_TEST_PROGRAM_MAIN()
| 62fb7d3a99762f8d3be8d80b7b7bd32b32bf8593.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/logical.h>
#include <thrust/pair.h>
#include <thrust/tabulate.h>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <random>
#include <unordered_map>
#include <vector>
template <typename K, typename V>
struct key_value_types {
using key_type = K;
using value_type = V;
using pair_type = thrust::pair<K, V>;
using map_type = concurrent_unordered_map<key_type, value_type>;
};
template <typename T>
struct InsertTest : public cudf::test::BaseFixture {
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using pair_type = typename T::pair_type;
using map_type = typename T::map_type;
InsertTest()
{
// prevent overflow of small types
const size_t input_size =
std::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max());
pairs.resize(input_size, cudf::default_stream_value);
map = std::move(map_type::create(compute_hash_table_size(size)));
cudf::default_stream_value.synchronize();
}
const cudf::size_type size{10000};
rmm::device_uvector<pair_type> pairs{static_cast<std::size_t>(size), cudf::default_stream_value};
std::unique_ptr<map_type, std::function<void(map_type*)>> map;
};
using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>,
key_value_types<int64_t, int64_t>,
key_value_types<int8_t, int8_t>,
key_value_types<int16_t, int16_t>,
key_value_types<int8_t, float>,
key_value_types<int16_t, double>,
key_value_types<int32_t, float>,
key_value_types<int64_t, double>>;
TYPED_TEST_SUITE(InsertTest, TestTypes);
template <typename map_type, typename pair_type>
struct insert_pair {
insert_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.insert(pair);
if (result.first == map.end()) { return false; }
return result.second;
}
map_type map;
};
template <typename map_type, typename pair_type>
struct find_pair {
find_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.find(pair.first);
if (result == map.end()) { return false; }
return *result == pair;
}
map_type map;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct unique_pair_generator {
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key_type(i), value_type(i));
}
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_pair_generator {
identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {}
__device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); }
key_type key;
value_type value;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_key_generator {
identical_key_generator(key_type k = 42) : key{k} {}
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key, value_type(i));
}
key_type key;
};
TYPED_TEST(InsertTest, UniqueKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(
rmm::exec_policy(), this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{});
// All pairs should be new inserts
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
identical_pair_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
insert_pair<map_type, pair_type>{*this->map}));
// Identical inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.end(),
identical_key_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
insert_pair<map_type, pair_type>{*this->map}));
// Identical key inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin() + 1,
this->pairs.end(),
insert_pair<map_type, pair_type>{*this->map}));
// Only first pair is present in map
EXPECT_TRUE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin(),
this->pairs.begin() + 1,
find_pair<map_type, pair_type>{*this->map}));
EXPECT_FALSE(thrust::all_of(rmm::exec_policy(),
this->pairs.begin() + 1,
this->pairs.end(),
find_pair<map_type, pair_type>{*this->map}));
}
CUDF_TEST_PROGRAM_MAIN()
|
869464e279589f75166e2ae9f597d6710b71facd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.h"
int PowTwoDivider(int n)
{
if (n == 0) return 0;
int divider = 1;
while ((n & divider) == 0) divider <<= 1;
return divider;
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <width> <height> <repeat>\n", argv[0]);
return 1;
}
const int width = atoi(argv[1]);
const int height = atoi(argv[2]);
const int repeat = atoi(argv[3]);
const int image_pitch = width * sizeof(float);
const int numPix = width * height;
const int image_size = numPix * sizeof(float);
float *image = (float*) malloc (image_size);
// image image with random values
srand(123);
for (int i = 0; i < numPix; i++) {
uint x = rand() % 256;
uint y = rand() % 256;
uint z = rand() % 256;
uint w = rand() % 256;
*(uint*)(&image[i]) = (w << 24) | (z << 16) | (y << 8) | x;
}
float *d_image;
hipMalloc((void**)&d_image, image_size);
int blocks = ::min(PowTwoDivider(height), 64);
dim3 dimBlockX (blocks);
dim3 dimGridX ((height + blocks - 1) / blocks);
blocks = ::min(PowTwoDivider(width), 64);
dim3 dimBlockY (blocks);
dim3 dimGridY ((width + blocks - 1) / blocks);
double total_time = 0.0;
for (int i = 0; i < repeat; i++) {
hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice);
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(toCoef2DX, dimGridX, dimBlockX, 0, 0, d_image, image_pitch, width, height);
hipLaunchKernelGGL(toCoef2DY, dimGridY, dimBlockY, 0, 0, d_image, image_pitch, width, height);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
printf("Average kernel execution time %f (s)\n", total_time * 1e-9f / repeat);
hipMemcpy(image, d_image, image_size, hipMemcpyDeviceToHost);
hipFree(d_image);
float sum = 0.f;
for (int i = 0; i < numPix; i++) {
const uchar *t = (const uchar*)(&image[i]);
sum += (t[0] + t[1] + t[2] + t[3]) / 4;
}
printf("Checksum: %f\n", sum / numPix);
free(image);
return 0;
}
| 869464e279589f75166e2ae9f597d6710b71facd.cu | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.h"
int PowTwoDivider(int n)
{
if (n == 0) return 0;
int divider = 1;
while ((n & divider) == 0) divider <<= 1;
return divider;
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <width> <height> <repeat>\n", argv[0]);
return 1;
}
const int width = atoi(argv[1]);
const int height = atoi(argv[2]);
const int repeat = atoi(argv[3]);
const int image_pitch = width * sizeof(float);
const int numPix = width * height;
const int image_size = numPix * sizeof(float);
float *image = (float*) malloc (image_size);
// image image with random values
srand(123);
for (int i = 0; i < numPix; i++) {
uint x = rand() % 256;
uint y = rand() % 256;
uint z = rand() % 256;
uint w = rand() % 256;
*(uint*)(&image[i]) = (w << 24) | (z << 16) | (y << 8) | x;
}
float *d_image;
hipMalloc((void**)&d_image, image_size);
int blocks = std::min(PowTwoDivider(height), 64);
dim3 dimBlockX (blocks);
dim3 dimGridX ((height + blocks - 1) / blocks);
blocks = std::min(PowTwoDivider(width), 64);
dim3 dimBlockY (blocks);
dim3 dimGridY ((width + blocks - 1) / blocks);
double total_time = 0.0;
for (int i = 0; i < repeat; i++) {
hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice);
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(toCoef2DX, dimGridX, dimBlockX, 0, 0, d_image, image_pitch, width, height);
hipLaunchKernelGGL(toCoef2DY, dimGridY, dimBlockY, 0, 0, d_image, image_pitch, width, height);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
printf("Average kernel execution time %f (s)\n", total_time * 1e-9f / repeat);
hipMemcpy(image, d_image, image_size, hipMemcpyDeviceToHost);
hipFree(d_image);
float sum = 0.f;
for (int i = 0; i < numPix; i++) {
const uchar *t = (const uchar*)(&image[i]);
sum += (t[0] + t[1] + t[2] + t[3]) / 4;
}
printf("Checksum: %f\n", sum / numPix);
free(image);
return 0;
}
|
8d91cdd53db014dc9259518c3f29016ecd7877c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
#include <thrust/host_vector.h>
#include <cstdio>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void Predicate(const unsigned int* const key,
const size_t num,
const unsigned int bit,
const unsigned int value,
unsigned int* const predicate) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
if ((key[tidx] & (1 << bit)) == value)
predicate[tidx] = 1;
else
predicate[tidx] = 0;
}
__global__ void ScanStep(const unsigned int* const in,
unsigned int* const out,
const size_t num,
const size_t width) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
unsigned int s = in[tidx];
if (tidx >= width)
s += in[tidx - width];
out[tidx] = s;
}
__global__ void ComputeNewIndex(const unsigned int* const key,
const size_t num,
const unsigned int bit,
const unsigned int value,
const unsigned int base,
const unsigned int* const offset,
unsigned int* const index) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
if ((key[tidx] & (1 << bit)) == value)
index[tidx] = base + offset[tidx] - 1; // inclusive -> exclusive
}
__global__ void Move(const unsigned int* const in_val,
const unsigned int* const in_pos,
unsigned int* const out_val,
unsigned int* const out_pos,
const unsigned int* const index,
const size_t num) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
unsigned int val = in_val[tidx];
unsigned int pos = in_pos[tidx];
size_t new_index = index[tidx];
out_val[new_index] = val;
out_pos[new_index] = pos;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code MUST RUN BEFORE YOUR CODE in case you accidentally change *
* the input values when implementing your radix sort. *
* *
* This code performs the reference radix sort on the host and compares your *
* sorted values to the reference. *
* *
* Thrust containers are used for copying memory from the GPU *
* ************************************************************************* */
/*
thrust::host_vector<unsigned int> h_inputVals(thrust::device_ptr<unsigned int>(d_inputVals),
thrust::device_ptr<unsigned int>(d_inputVals) + numElems);
thrust::host_vector<unsigned int> h_inputPos(thrust::device_ptr<unsigned int>(d_inputPos),
thrust::device_ptr<unsigned int>(d_inputPos) + numElems);
thrust::host_vector<unsigned int> h_outputVals(numElems);
thrust::host_vector<unsigned int> h_outputPos(numElems);
reference_calculation(&h_inputVals[0], &h_inputPos[0],
&h_outputVals[0], &h_outputPos[0],
numElems);
*/
//PUT YOUR SORT HERE
unsigned int *d_scan_ping;
checkCudaErrors(hipMalloc(&d_scan_ping, sizeof(unsigned int) * numElems));
unsigned int *d_scan_pong;
checkCudaErrors(hipMalloc(&d_scan_pong, sizeof(unsigned int) * numElems));
unsigned int *d_index;
checkCudaErrors(hipMalloc(&d_index, sizeof(unsigned int) * numElems));
const dim3 blockSize(1024);
const dim3 gridSize((numElems + 1023) / 1024);
unsigned int *d_in_val = d_inputVals;
unsigned int *d_in_pos = d_inputPos;
unsigned int *d_out_val = d_outputVals;
unsigned int *d_out_pos = d_outputPos;
for (unsigned int bit = 0; bit < 32; ++bit) {
// 0
hipLaunchKernelGGL(( Predicate), dim3(gridSize), dim3(blockSize), 0, 0, d_in_val,
numElems,
bit,
0,
d_scan_ping);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for (unsigned int width = 1; width < numElems; width <<= 1) {
hipLaunchKernelGGL(( ScanStep), dim3(gridSize), dim3(blockSize), 0, 0, d_scan_ping,
d_scan_pong,
numElems,
width);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Swap. d_scan_ping has the final scan results.
unsigned int *t = d_scan_ping;
d_scan_ping = d_scan_pong;
d_scan_pong = t;
}
unsigned int h_numZeros;
checkCudaErrors(hipMemcpy(&h_numZeros,
&d_scan_ping[numElems - 1],
sizeof(unsigned int),
hipMemcpyDeviceToHost));
//printf("#0s = %d\n", h_numZeros);
hipLaunchKernelGGL(( ComputeNewIndex), dim3(gridSize), dim3(blockSize), 0, 0, d_in_val,
numElems,
bit,
0,
0,
d_scan_ping,
d_index);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// 1
hipLaunchKernelGGL(( Predicate), dim3(gridSize), dim3(blockSize), 0, 0, d_in_val,
numElems,
bit,
(1 << bit),
d_scan_ping);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for (unsigned int width = 1; width < numElems; width <<= 1) {
hipLaunchKernelGGL(( ScanStep), dim3(gridSize), dim3(blockSize), 0, 0, d_scan_ping,
d_scan_pong,
numElems,
width);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Swap. d_scan_ping has the final scan results.
unsigned int *t = d_scan_ping;
d_scan_ping = d_scan_pong;
d_scan_pong = t;
}
/*
unsigned int h_numOnes;
checkCudaErrors(hipMemcpy(&h_numOnes, &d_scan_ping[numElems - 1], sizeof(unsigned int), hipMemcpyDeviceToHost));
printf("#1s = %d\n", h_numOnes);
printf("total = %d(%d)\n", numElems, h_numZeros + h_numOnes);
*/
hipLaunchKernelGGL(( ComputeNewIndex), dim3(gridSize), dim3(blockSize), 0, 0, d_in_val,
numElems,
bit,
(1 << bit),
h_numZeros,
d_scan_ping,
d_index);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Move
hipLaunchKernelGGL(( Move), dim3(gridSize), dim3(blockSize), 0, 0, d_in_val, d_in_pos, d_out_val, d_out_pos,
d_index, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Swap input and output pointers
unsigned int *t = d_in_val;
d_in_val = d_out_val;
d_out_val = t;
t = d_in_pos;
d_in_pos = d_out_pos;
d_out_pos = t;
}
// After 32 iterations, results are in d_output*.
checkCudaErrors(hipMemcpy(d_outputVals,
d_inputVals,
sizeof(unsigned int) * numElems,
hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_outputPos,
d_inputPos,
sizeof(unsigned int) * numElems,
hipMemcpyDeviceToDevice));
checkCudaErrors(hipFree(d_scan_ping));
checkCudaErrors(hipFree(d_scan_pong));
checkCudaErrors(hipFree(d_index));
/* *********************************************************************** *
* Uncomment the code below to do the correctness checking between your *
* result and the reference. *
**************************************************************************/
/*
thrust::host_vector<unsigned int> h_yourOutputVals(thrust::device_ptr<unsigned int>(d_outputVals),
thrust::device_ptr<unsigned int>(d_outputVals) + numElems);
thrust::host_vector<unsigned int> h_yourOutputPos(thrust::device_ptr<unsigned int>(d_outputPos),
thrust::device_ptr<unsigned int>(d_outputPos) + numElems);
checkResultsExact(&h_outputVals[0], &h_yourOutputVals[0], numElems);
checkResultsExact(&h_outputPos[0], &h_yourOutputPos[0], numElems);
*/
}
| 8d91cdd53db014dc9259518c3f29016ecd7877c6.cu | //Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
#include <thrust/host_vector.h>
#include <cstdio>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void Predicate(const unsigned int* const key,
const size_t num,
const unsigned int bit,
const unsigned int value,
unsigned int* const predicate) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
if ((key[tidx] & (1 << bit)) == value)
predicate[tidx] = 1;
else
predicate[tidx] = 0;
}
__global__ void ScanStep(const unsigned int* const in,
unsigned int* const out,
const size_t num,
const size_t width) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
unsigned int s = in[tidx];
if (tidx >= width)
s += in[tidx - width];
out[tidx] = s;
}
__global__ void ComputeNewIndex(const unsigned int* const key,
const size_t num,
const unsigned int bit,
const unsigned int value,
const unsigned int base,
const unsigned int* const offset,
unsigned int* const index) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
if ((key[tidx] & (1 << bit)) == value)
index[tidx] = base + offset[tidx] - 1; // inclusive -> exclusive
}
__global__ void Move(const unsigned int* const in_val,
const unsigned int* const in_pos,
unsigned int* const out_val,
unsigned int* const out_pos,
const unsigned int* const index,
const size_t num) {
const size_t tidx = blockIdx.x * blockDim.x + threadIdx.x;
if (tidx >= num)
return;
unsigned int val = in_val[tidx];
unsigned int pos = in_pos[tidx];
size_t new_index = index[tidx];
out_val[new_index] = val;
out_pos[new_index] = pos;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code MUST RUN BEFORE YOUR CODE in case you accidentally change *
* the input values when implementing your radix sort. *
* *
* This code performs the reference radix sort on the host and compares your *
* sorted values to the reference. *
* *
* Thrust containers are used for copying memory from the GPU *
* ************************************************************************* */
/*
thrust::host_vector<unsigned int> h_inputVals(thrust::device_ptr<unsigned int>(d_inputVals),
thrust::device_ptr<unsigned int>(d_inputVals) + numElems);
thrust::host_vector<unsigned int> h_inputPos(thrust::device_ptr<unsigned int>(d_inputPos),
thrust::device_ptr<unsigned int>(d_inputPos) + numElems);
thrust::host_vector<unsigned int> h_outputVals(numElems);
thrust::host_vector<unsigned int> h_outputPos(numElems);
reference_calculation(&h_inputVals[0], &h_inputPos[0],
&h_outputVals[0], &h_outputPos[0],
numElems);
*/
//PUT YOUR SORT HERE
unsigned int *d_scan_ping;
checkCudaErrors(cudaMalloc(&d_scan_ping, sizeof(unsigned int) * numElems));
unsigned int *d_scan_pong;
checkCudaErrors(cudaMalloc(&d_scan_pong, sizeof(unsigned int) * numElems));
unsigned int *d_index;
checkCudaErrors(cudaMalloc(&d_index, sizeof(unsigned int) * numElems));
const dim3 blockSize(1024);
const dim3 gridSize((numElems + 1023) / 1024);
unsigned int *d_in_val = d_inputVals;
unsigned int *d_in_pos = d_inputPos;
unsigned int *d_out_val = d_outputVals;
unsigned int *d_out_pos = d_outputPos;
for (unsigned int bit = 0; bit < 32; ++bit) {
// 0
Predicate<<<gridSize, blockSize>>>(d_in_val,
numElems,
bit,
0,
d_scan_ping);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for (unsigned int width = 1; width < numElems; width <<= 1) {
ScanStep<<<gridSize, blockSize>>>(d_scan_ping,
d_scan_pong,
numElems,
width);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Swap. d_scan_ping has the final scan results.
unsigned int *t = d_scan_ping;
d_scan_ping = d_scan_pong;
d_scan_pong = t;
}
unsigned int h_numZeros;
checkCudaErrors(cudaMemcpy(&h_numZeros,
&d_scan_ping[numElems - 1],
sizeof(unsigned int),
cudaMemcpyDeviceToHost));
//printf("#0s = %d\n", h_numZeros);
ComputeNewIndex<<<gridSize, blockSize>>>(d_in_val,
numElems,
bit,
0,
0,
d_scan_ping,
d_index);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// 1
Predicate<<<gridSize, blockSize>>>(d_in_val,
numElems,
bit,
(1 << bit),
d_scan_ping);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for (unsigned int width = 1; width < numElems; width <<= 1) {
ScanStep<<<gridSize, blockSize>>>(d_scan_ping,
d_scan_pong,
numElems,
width);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Swap. d_scan_ping has the final scan results.
unsigned int *t = d_scan_ping;
d_scan_ping = d_scan_pong;
d_scan_pong = t;
}
/*
unsigned int h_numOnes;
checkCudaErrors(cudaMemcpy(&h_numOnes, &d_scan_ping[numElems - 1], sizeof(unsigned int), cudaMemcpyDeviceToHost));
printf("#1s = %d\n", h_numOnes);
printf("total = %d(%d)\n", numElems, h_numZeros + h_numOnes);
*/
ComputeNewIndex<<<gridSize, blockSize>>>(d_in_val,
numElems,
bit,
(1 << bit),
h_numZeros,
d_scan_ping,
d_index);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Move
Move<<<gridSize, blockSize>>>(d_in_val, d_in_pos, d_out_val, d_out_pos,
d_index, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Swap input and output pointers
unsigned int *t = d_in_val;
d_in_val = d_out_val;
d_out_val = t;
t = d_in_pos;
d_in_pos = d_out_pos;
d_out_pos = t;
}
// After 32 iterations, results are in d_output*.
checkCudaErrors(cudaMemcpy(d_outputVals,
d_inputVals,
sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos,
d_inputPos,
sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaFree(d_scan_ping));
checkCudaErrors(cudaFree(d_scan_pong));
checkCudaErrors(cudaFree(d_index));
/* *********************************************************************** *
* Uncomment the code below to do the correctness checking between your *
* result and the reference. *
**************************************************************************/
/*
thrust::host_vector<unsigned int> h_yourOutputVals(thrust::device_ptr<unsigned int>(d_outputVals),
thrust::device_ptr<unsigned int>(d_outputVals) + numElems);
thrust::host_vector<unsigned int> h_yourOutputPos(thrust::device_ptr<unsigned int>(d_outputPos),
thrust::device_ptr<unsigned int>(d_outputPos) + numElems);
checkResultsExact(&h_outputVals[0], &h_yourOutputVals[0], numElems);
checkResultsExact(&h_outputPos[0], &h_yourOutputPos[0], numElems);
*/
}
|
495f6728ce71e679aca6de0c15d2212a0026ef4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file FadlunEtAlSolver.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the methods of the class \c FadlunEtAlSolver.
*/
#include "FadlunEtAlSolver.h"
#include <sys/stat.h>
#include <solvers/NavierStokes/kernels/generateQT.h>
/**
* \brief Constructor. Copies the database and information about the
* computational grid.
*/
template <typename memoryType>
FadlunEtAlSolver<memoryType>::FadlunEtAlSolver(parameterDB *pDB, domain *dInfo)
{
NavierStokesSolver<memoryType>::paramDB = pDB;
NavierStokesSolver<memoryType>::domInfo = dInfo;
}
/**
* \brief Generates the matrix \c QT, Q and G for FadlunEtAlSolver.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::generateQT()
{
NavierStokesSolver<memoryType>::generateQT();
G = NavierStokesSolver<memoryType>::Q;
updateG();
}
/**
* \brief The host function for updateG() is incomplete.
*/
template <>
void FadlunEtAlSolver<host_memory>::updateG()
{
}
/**
* \brief Zeros rows of G that correspond to the forcing nodes.
*
* Calls the function updateQ, but passes the matrix G to it.
*/
template <>
void FadlunEtAlSolver<device_memory>::updateG()
{
const int blocksize = 256;
int nx = domInfo->nx,
ny = domInfo->ny;
int GSize = 4*nx*ny-2*(nx+ny);
int *GRows = thrust::raw_pointer_cast(&(G.row_indices[0])),
*GCols = thrust::raw_pointer_cast(&(G.column_indices[0]));
int *tags_r = thrust::raw_pointer_cast(&(tagsD[0]));
real *GVals = thrust::raw_pointer_cast(&(G.values[0]));
dim3 dimGrid( int((GSize-0.5)/blocksize) + 1, 1);
dim3 dimBlock(blocksize, 1);
hipLaunchKernelGGL(( kernels::updateQ) , dim3(dimGrid), dim3(dimBlock), 0, 0, GRows, GCols, GVals, GSize, tags_r);
}
/**
* \brief Add the pressure gradient to the right hand side of the
* momentum equation.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::calculateExplicitLambdaTerms()
{
// temp_1 = G.pressure
cusp::multiply(G, DirectForcingSolver<memoryType>::pressure, NavierStokesSolver<memoryType>::temp1);
// r^n = r^n - temp_1
cusp::blas::axpy(NavierStokesSolver<memoryType>::temp1, NavierStokesSolver<memoryType>::rn, -1.0);
}
/**
* \brief Generates the matrix for the Poisson equation.
*
* Calls the function from NavierStokesSolver because it does not need to set
* any node inside the immersed boundary to zero.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::generateC()
{
NavierStokesSolver<memoryType>::generateC();
}
// specialization of the class
template class FadlunEtAlSolver<host_memory>;
template class FadlunEtAlSolver<device_memory>;
/******************************************************************************/
/**
* \brief Constructor. Copies the database and information about the
* computational grid.
*/
template <typename memoryType>
FEAModifiedSolver<memoryType>::FEAModifiedSolver(parameterDB *pDB, domain *dInfo)
{
NavierStokesSolver<memoryType>::paramDB = pDB;
NavierStokesSolver<memoryType>::domInfo = dInfo;
}
/**
* \brief Generates the matrices QT and Q, which are the same as if an
* immersed boundary is not present in the fluid.
*/
template <typename memoryType>
void FEAModifiedSolver<memoryType>::generateQT()
{
NavierStokesSolver<memoryType>::generateQT();
}
/**
* \brief Generates the matrix for the Poisson equation.
*
* Calls the function from NavierStokesSolver because it does not need to set
* any node inside the immersed boundary to zero.
*/
template <typename memoryType>
void FEAModifiedSolver<memoryType>::generateC()
{
NavierStokesSolver<memoryType>::generateC();
}
template class FEAModifiedSolver<host_memory>;
template class FEAModifiedSolver<device_memory>;
| 495f6728ce71e679aca6de0c15d2212a0026ef4b.cu | /***************************************************************************//**
* \file FadlunEtAlSolver.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the methods of the class \c FadlunEtAlSolver.
*/
#include "FadlunEtAlSolver.h"
#include <sys/stat.h>
#include <solvers/NavierStokes/kernels/generateQT.h>
/**
* \brief Constructor. Copies the database and information about the
* computational grid.
*/
template <typename memoryType>
FadlunEtAlSolver<memoryType>::FadlunEtAlSolver(parameterDB *pDB, domain *dInfo)
{
NavierStokesSolver<memoryType>::paramDB = pDB;
NavierStokesSolver<memoryType>::domInfo = dInfo;
}
/**
* \brief Generates the matrix \c QT, Q and G for FadlunEtAlSolver.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::generateQT()
{
NavierStokesSolver<memoryType>::generateQT();
G = NavierStokesSolver<memoryType>::Q;
updateG();
}
/**
* \brief The host function for updateG() is incomplete.
*/
template <>
void FadlunEtAlSolver<host_memory>::updateG()
{
}
/**
* \brief Zeros rows of G that correspond to the forcing nodes.
*
* Calls the function updateQ, but passes the matrix G to it.
*/
template <>
void FadlunEtAlSolver<device_memory>::updateG()
{
const int blocksize = 256;
int nx = domInfo->nx,
ny = domInfo->ny;
int GSize = 4*nx*ny-2*(nx+ny);
int *GRows = thrust::raw_pointer_cast(&(G.row_indices[0])),
*GCols = thrust::raw_pointer_cast(&(G.column_indices[0]));
int *tags_r = thrust::raw_pointer_cast(&(tagsD[0]));
real *GVals = thrust::raw_pointer_cast(&(G.values[0]));
dim3 dimGrid( int((GSize-0.5)/blocksize) + 1, 1);
dim3 dimBlock(blocksize, 1);
kernels::updateQ <<<dimGrid, dimBlock>>> (GRows, GCols, GVals, GSize, tags_r);
}
/**
* \brief Add the pressure gradient to the right hand side of the
* momentum equation.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::calculateExplicitLambdaTerms()
{
// temp_1 = G.pressure
cusp::multiply(G, DirectForcingSolver<memoryType>::pressure, NavierStokesSolver<memoryType>::temp1);
// r^n = r^n - temp_1
cusp::blas::axpy(NavierStokesSolver<memoryType>::temp1, NavierStokesSolver<memoryType>::rn, -1.0);
}
/**
* \brief Generates the matrix for the Poisson equation.
*
* Calls the function from NavierStokesSolver because it does not need to set
* any node inside the immersed boundary to zero.
*/
template <typename memoryType>
void FadlunEtAlSolver<memoryType>::generateC()
{
NavierStokesSolver<memoryType>::generateC();
}
// specialization of the class
template class FadlunEtAlSolver<host_memory>;
template class FadlunEtAlSolver<device_memory>;
/******************************************************************************/
/**
* \brief Constructor. Copies the database and information about the
* computational grid.
*/
template <typename memoryType>
FEAModifiedSolver<memoryType>::FEAModifiedSolver(parameterDB *pDB, domain *dInfo)
{
NavierStokesSolver<memoryType>::paramDB = pDB;
NavierStokesSolver<memoryType>::domInfo = dInfo;
}
/**
* \brief Generates the matrices QT and Q, which are the same as if an
* immersed boundary is not present in the fluid.
*/
template <typename memoryType>
void FEAModifiedSolver<memoryType>::generateQT()
{
NavierStokesSolver<memoryType>::generateQT();
}
/**
* \brief Generates the matrix for the Poisson equation.
*
* Calls the function from NavierStokesSolver because it does not need to set
* any node inside the immersed boundary to zero.
*/
template <typename memoryType>
void FEAModifiedSolver<memoryType>::generateC()
{
NavierStokesSolver<memoryType>::generateC();
}
template class FEAModifiedSolver<host_memory>;
template class FEAModifiedSolver<device_memory>;
|
42a907a5f83c7f19a3de787c3d9724b533530679.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by DY on 17-8-5.
//
#ifndef NLP_CUDA_CU_SPARSE_MATRIX_H
#define NLP_CUDA_CU_SPARSE_MATRIX_H
#include "SparseMatrix.h"
#include "CuSparseMatrixHeader.cu"
template <typename T>
CuSparseMatrix<T>& CuSparseMatrix<T>::operator=(const SparseMatrix<T>& o) {
assert(rows==o.rows && nnz==o.nnz);
this->rows = o.rows;
this->cols = o.cols;
this->nnz = o.nnz;
checkCudaErrors(hipMemcpy(row_ptr, o.row_ptr, sizeof(int)*(o.rows + 1), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(index, o.index, sizeof(int)*o.nnz, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(data, o.data, sizeof(T)*o.nnz, hipMemcpyHostToDevice));
this->needFree = true;
return *this;
};
template <typename T>
CuSparseMatrix<T> CuSparseMatrix<T>::operator~() {
SparseMatrix<T> h = *this;
*this = ~h;
return *this;
};
template <typename T>
CuDenseMatrix<T> CuSparseMatrix<T>::dp(const CuDenseMatrix<T>& o) {
CuDenseMatrix<T> C;
};
template <typename T>
__global__
void printKernel(CuSparseMatrix<T> m, int rows, int cols) {
printf("CuSparseMatrix[rows=%d, cols=%d, nnz=%d]:\n", m.rows, m.cols, m.nnz);
for (int i = 0; i < min(m.rows, rows); ++i) {
int c = 0;
for (int j = m.row_ptr[i]; j < m.row_ptr[i + 1] && c < cols; ++j, ++c) {
printf("(%d, %.3e)\t", m.index[j], m.data[j]);
}
printf("\n");
}
printf("--------------------------------");
}
template <typename T>
void CuSparseMatrix<T>::print(int rows, int cols) {
hipLaunchKernelGGL(( printKernel), dim3(1), dim3(1), 0, 0, *this, rows, cols);
checkCudaErrors(hipDeviceSynchronize());
}
#endif
| 42a907a5f83c7f19a3de787c3d9724b533530679.cu | //
// Created by DY on 17-8-5.
//
#ifndef NLP_CUDA_CU_SPARSE_MATRIX_H
#define NLP_CUDA_CU_SPARSE_MATRIX_H
#include "SparseMatrix.h"
#include "CuSparseMatrixHeader.cu"
template <typename T>
CuSparseMatrix<T>& CuSparseMatrix<T>::operator=(const SparseMatrix<T>& o) {
assert(rows==o.rows && nnz==o.nnz);
this->rows = o.rows;
this->cols = o.cols;
this->nnz = o.nnz;
checkCudaErrors(cudaMemcpy(row_ptr, o.row_ptr, sizeof(int)*(o.rows + 1), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(index, o.index, sizeof(int)*o.nnz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(data, o.data, sizeof(T)*o.nnz, cudaMemcpyHostToDevice));
this->needFree = true;
return *this;
};
template <typename T>
CuSparseMatrix<T> CuSparseMatrix<T>::operator~() {
SparseMatrix<T> h = *this;
*this = ~h;
return *this;
};
template <typename T>
CuDenseMatrix<T> CuSparseMatrix<T>::dp(const CuDenseMatrix<T>& o) {
CuDenseMatrix<T> C;
};
template <typename T>
__global__
void printKernel(CuSparseMatrix<T> m, int rows, int cols) {
printf("CuSparseMatrix[rows=%d, cols=%d, nnz=%d]:\n", m.rows, m.cols, m.nnz);
for (int i = 0; i < min(m.rows, rows); ++i) {
int c = 0;
for (int j = m.row_ptr[i]; j < m.row_ptr[i + 1] && c < cols; ++j, ++c) {
printf("(%d, %.3e)\t", m.index[j], m.data[j]);
}
printf("\n");
}
printf("--------------------------------");
}
template <typename T>
void CuSparseMatrix<T>::print(int rows, int cols) {
printKernel<<<1, 1>>>(*this, rows, cols);
checkCudaErrors(cudaDeviceSynchronize());
}
#endif
|
fcf2af39200d0b06f12decfdea2542ae82a93f66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void vcopyshift(const int n, const int shift, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
b[i+shift] = a[i];
}
}
} | fcf2af39200d0b06f12decfdea2542ae82a93f66.cu | extern "C"
{
__global__ void vcopyshift(const int n, const int shift, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
b[i+shift] = a[i];
}
}
} |
aa041328a2462f2805ee71a50344bcd2df8c251e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__ void matrixMultiplicationKernel(int* A,int* B,int* C,int N)
{
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmp_sum = 0.0f;
if(ROW < N && COL < N){
for(int i=0;i<N;i++){
tmp_sum += A[ROW*N+i] *B[i*N+COL];
}
}
C[ROW*N+COL] = tmp_sum;
}
void matrixMultiplication(int* A,int* B,int* C,int N);
int main()
{
int N = 16;
//Host i/o vectors
int *h_A;
int *h_B;
int *h_C;
//Device i/o vector
int *d_A;
int *d_B;
int *d_C;
size_t bytes = N*N*sizeof(int);
h_A = (int*)malloc(bytes);
h_B = (int*)malloc(bytes);
h_C = (int*)malloc(bytes);
hipMalloc(&d_A,bytes);
hipMalloc(&d_B,bytes);
hipMalloc(&d_C,bytes);
// Initialize matricies on the host
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
h_A[i*N+j] = 2;
h_B[i*N+j] = 3;
}
}
//Copy host vectors to device
hipMemcpy(d_A,h_A,bytes,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,bytes,hipMemcpyHostToDevice);
matrixMultiplication(d_A,d_B,d_C,N);
hipMemcpy(h_C,d_C,bytes,hipMemcpyDeviceToHost);
//check result
for(int i=0;i<4;i++){
for(int j=0;j<4;j++){
printf(" %d",h_C[i*N+j]);
}
printf("\n");
}
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
void matrixMultiplication(int* A,int* B,int* C,int N)
{
dim3 threadsPerBlock(N,N);
dim3 blocksPerGrid(1,1);
if(N*N>512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(int(N)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(int(N)/double(threadsPerBlock.y));
}
hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A,B,C,N);
}
| aa041328a2462f2805ee71a50344bcd2df8c251e.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <math.h>
__global__ void matrixMultiplicationKernel(int* A,int* B,int* C,int N)
{
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmp_sum = 0.0f;
if(ROW < N && COL < N){
for(int i=0;i<N;i++){
tmp_sum += A[ROW*N+i] *B[i*N+COL];
}
}
C[ROW*N+COL] = tmp_sum;
}
void matrixMultiplication(int* A,int* B,int* C,int N);
int main()
{
int N = 16;
//Host i/o vectors
int *h_A;
int *h_B;
int *h_C;
//Device i/o vector
int *d_A;
int *d_B;
int *d_C;
size_t bytes = N*N*sizeof(int);
h_A = (int*)malloc(bytes);
h_B = (int*)malloc(bytes);
h_C = (int*)malloc(bytes);
cudaMalloc(&d_A,bytes);
cudaMalloc(&d_B,bytes);
cudaMalloc(&d_C,bytes);
// Initialize matricies on the host
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
h_A[i*N+j] = 2;
h_B[i*N+j] = 3;
}
}
//Copy host vectors to device
cudaMemcpy(d_A,h_A,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,bytes,cudaMemcpyHostToDevice);
matrixMultiplication(d_A,d_B,d_C,N);
cudaMemcpy(h_C,d_C,bytes,cudaMemcpyDeviceToHost);
//check result
for(int i=0;i<4;i++){
for(int j=0;j<4;j++){
printf(" %d",h_C[i*N+j]);
}
printf("\n");
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
void matrixMultiplication(int* A,int* B,int* C,int N)
{
dim3 threadsPerBlock(N,N);
dim3 blocksPerGrid(1,1);
if(N*N>512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(int(N)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(int(N)/double(threadsPerBlock.y));
}
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A,B,C,N);
}
|
e57e20266d01cc65d9f0cfbce1261fd9db3ee69a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NSTREAM 4
#define BDIM 128
void printArray(float *a, int size){
for (int i = 0; i < size; i++){
if(i % 128 == 0)
printf("\n");
printf("%.0f ", a[i]);
}
printf("\n\n");
}
__global__ void sumArrays(float *A, float *B, float *C, const int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
if( idx == N-1)
C[idx] = A[idx+2] + B[idx+2];
else
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char **argv)
{
printf("> %s Starting...\n", argv[0]);
// set up data size of vectors
int nElem = 1 << 9;
printf("> vector size = %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// malloc pinned host memory for async memcpy
float *h_A, *h_B, *gpuRef;
hipHostMalloc((void**)&h_A, nBytes, hipHostMallocDefault);
hipHostMalloc((void**)&h_B, nBytes, hipHostMallocDefault);
hipHostMalloc((void**)&gpuRef, nBytes, hipHostMallocDefault);
// initialize data at host side
for (int i = 0; i < nElem; i++)
{
h_A[i] = h_B[i] = i;
}
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
// invoke kernel at host side
dim3 block (BDIM);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("> grid (%d, %d) block (%d, %d)\n", grid.x, grid.y, block.x,
block.y);
// sequential operation
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumArrays), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
printf("\n");
printArray(gpuRef, nElem);
// grid parallel operation
int iElem = nElem / NSTREAM;
size_t iBytes = iElem * sizeof(float);
grid.x = (iElem + block.x - 1) / block.x;
hipStream_t stream[NSTREAM];
for (int i = 0; i < NSTREAM; ++i)
{
hipStreamCreate(&stream[i]);
}
// initiate all work on the device asynchronously in depth-first order
for (int i = 0; i < NSTREAM; ++i)
{
int ioffset = i * iElem;
hipMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes,
hipMemcpyHostToDevice, stream[i]);
hipMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes,
hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( sumArrays), dim3(grid), dim3(block), 0, stream[i], &d_A[ioffset], &d_B[ioffset],
&d_C[ioffset], iElem);
hipMemcpyAsync(&gpuRef[ioffset], &d_C[ioffset], iBytes,
hipMemcpyDeviceToHost, stream[i]);
}
// check kernel error
hipGetLastError();
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(gpuRef);
// destroy streams
for (int i = 0; i < NSTREAM; ++i)
{
hipStreamDestroy(stream[i]);
}
hipDeviceReset();
return(0);
} | e57e20266d01cc65d9f0cfbce1261fd9db3ee69a.cu | #include <stdio.h>
#define NSTREAM 4
#define BDIM 128
void printArray(float *a, int size){
for (int i = 0; i < size; i++){
if(i % 128 == 0)
printf("\n");
printf("%.0f ", a[i]);
}
printf("\n\n");
}
__global__ void sumArrays(float *A, float *B, float *C, const int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
if( idx == N-1)
C[idx] = A[idx+2] + B[idx+2];
else
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char **argv)
{
printf("> %s Starting...\n", argv[0]);
// set up data size of vectors
int nElem = 1 << 9;
printf("> vector size = %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// malloc pinned host memory for async memcpy
float *h_A, *h_B, *gpuRef;
cudaHostAlloc((void**)&h_A, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_B, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&gpuRef, nBytes, cudaHostAllocDefault);
// initialize data at host side
for (int i = 0; i < nElem; i++)
{
h_A[i] = h_B[i] = i;
}
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// invoke kernel at host side
dim3 block (BDIM);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("> grid (%d, %d) block (%d, %d)\n", grid.x, grid.y, block.x,
block.y);
// sequential operation
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
sumArrays<<<grid, block>>>(d_A, d_B, d_C, nElem);
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
printf("\n");
printArray(gpuRef, nElem);
// grid parallel operation
int iElem = nElem / NSTREAM;
size_t iBytes = iElem * sizeof(float);
grid.x = (iElem + block.x - 1) / block.x;
cudaStream_t stream[NSTREAM];
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamCreate(&stream[i]);
}
// initiate all work on the device asynchronously in depth-first order
for (int i = 0; i < NSTREAM; ++i)
{
int ioffset = i * iElem;
cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes,
cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes,
cudaMemcpyHostToDevice, stream[i]);
sumArrays<<<grid, block, 0, stream[i]>>>(&d_A[ioffset], &d_B[ioffset],
&d_C[ioffset], iElem);
cudaMemcpyAsync(&gpuRef[ioffset], &d_C[ioffset], iBytes,
cudaMemcpyDeviceToHost, stream[i]);
}
// check kernel error
cudaGetLastError();
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(gpuRef);
// destroy streams
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamDestroy(stream[i]);
}
cudaDeviceReset();
return(0);
} |
6005e050f71b8be5cd506bc3bc3859b1150b0c32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void threshold_and_support(float *vec, int *support, const int n, const float T)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
if (abs(vec[xIndex])<T) {
vec[xIndex] = 0.0f;
support[xIndex]=2;
}
}
} | 6005e050f71b8be5cd506bc3bc3859b1150b0c32.cu | #include "includes.h"
__global__ void threshold_and_support(float *vec, int *support, const int n, const float T)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
if (abs(vec[xIndex])<T) {
vec[xIndex] = 0.0f;
support[xIndex]=2;
}
}
} |
02e2626f315b52cf3897c87af1a8e596878e273d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transform_fc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
const float *raw_input = NULL;
hipMalloc(&raw_input, XSIZE*YSIZE);
const int width = 1;
const int channels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transform_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transform_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transform_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 02e2626f315b52cf3897c87af1a8e596878e273d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transform_fc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
const float *raw_input = NULL;
cudaMalloc(&raw_input, XSIZE*YSIZE);
const int width = 1;
const int channels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transform_fc<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transform_fc<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transform_fc<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7bf351f3853715bb73c9a60991d64a654904d941.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
extern "C"
{
__global__ void GPU_add(
int n,
int* d_a,
int* d_b
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
d_a[i] += d_b[i];
}
}
}
| 7bf351f3853715bb73c9a60991d64a654904d941.cu | #include <stdio.h>
extern "C"
{
__global__ void GPU_add(
int n,
int* d_a,
int* d_b
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
d_a[i] += d_b[i];
}
}
}
|
8656f784b21f7809840a807367659dfc91a3256b.hip | // !!! This is a file automatically generated by hipify!!!
#include <pthread.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
};
struct CompressedPos {
long compressed_data_batch;
long offset;
};
int n_threads = 8;
int n_compressed_data_batches = 8;
long layer_sizes_alexnet[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256,
13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384,
13l * 13 * 256, 6l * 6 * 256};
bool layer_compress_alexnet[] = {true, true, true, true,
true, true, true, true};
long layer_density_alexnet[] = {50, 80, 40, 60, 70, 70, 30, 60};
int num_layers_alexnet = 8;
long layer_sizes_vgg[] = {224l * 224 * 64, 224l * 224 * 64, 112l * 112 * 64,
112l * 112 * 128, 112l * 112 * 128, 56l * 56 * 128,
56l * 56 * 256, 56l * 56 * 256, 56l * 56 * 256,
28l * 28 * 256, 28l * 28 * 512, 28l * 28 * 512,
28l * 28 * 512, 14l * 14 * 512, 14l * 14 * 512,
14l * 14 * 512, 14l * 14 * 512, 7l * 7 * 512};
long layer_density_vgg[] = {50, 20, 30, 20, 10, 20, 20, 20, 10,
20, 20, 10, 10, 10, 20, 20, 10, 15};
bool layer_compress_vgg[] = {true, true, true, true, true, true,
true, true, true, true, true, true,
true, true, true, true, true, true};
int num_layers_vgg = 18;
// long *layer_sizes = layer_sizes_alexnet;
// bool *layer_compress = layer_compress_alexnet;
// long *layer_density = layer_density_alexnet;
// int num_layers = num_layers_alexnet;
long *layer_sizes = layer_sizes_alexnet;
bool *layer_compress = layer_compress_alexnet;
long *layer_density = layer_density_alexnet;
int num_layers = num_layers_alexnet;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float ***compressed_data = thread_arg->compressed_data;
bool **compressed_data_taken = thread_arg->compressed_data_taken;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
hipHostMalloc((void **)&compressed_data[thread_num],
n_compressed_data_batches * sizeof(float *));
hipHostMalloc((void **)&compressed_data_taken[thread_num],
n_compressed_data_batches * sizeof(bool));
for (int i = 0; i < n_compressed_data_batches; i++) {
compressed_data_taken[thread_num][i] = false;
}
CompressedPos current_pos;
current_pos.compressed_data_batch = -1,
current_pos.offset = compressed_data_batch_size;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
if (current_pos.offset == compressed_data_batch_size) {
hipHostMalloc(
(void **)&compressed_data[thread_num]
[current_pos.compressed_data_batch + 1],
compressed_data_batch_size * sizeof(float));
compressed_data_taken[thread_num]
[current_pos.compressed_data_batch + 1] = true;
current_pos.compressed_data_batch =
current_pos.compressed_data_batch + 1;
current_pos.offset = 0;
}
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[thread_num][current_pos.compressed_data_batch]
[current_pos.offset] = original_data[j];
current_pos.offset += 1;
} else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
void *decompressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float ***compressed_data = thread_arg->compressed_data;
bool **compressed_data_taken = thread_arg->compressed_data_taken;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
// hipHostMalloc((void **)&compressed_data[thread_num],
// n_compressed_data_batches * sizeof(float *));
CompressedPos current_pos;
current_pos.compressed_data_batch = 0, current_pos.offset = 0;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (mask[mask_pos] & 0x80000000 > 0) {
original_data[j] =
compressed_data[thread_num][current_pos.compressed_data_batch]
[current_pos.offset];
current_pos.offset += 1;
if (current_pos.offset == compressed_data_batch_size) {
current_pos.compressed_data_batch += 1;
current_pos.offset = 0;
}
} else {
original_data[j] = 0;
}
mask[mask_pos] = mask[mask_pos] << 1;
}
}
for (int i = 0; i < n_compressed_data_batches; i++) {
if (compressed_data_taken[thread_num][i])
hipHostFree(compressed_data[thread_num][i]);
else
break;
}
hipHostFree(compressed_data_taken[thread_num]);
hipHostFree(compressed_data[thread_num]);
return NULL;
}
int main() {
int batch_size = 64;
long total_space = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
vector<float ***> compressed_data_vec;
vector<unsigned int *> mask_vec;
vector<bool **> compressed_data_taken_vec;
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
if (!layer_compress[j]) continue;
long num_elements = layer_sizes[j];
float *original_data, ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
hipHostMalloc((void **)&original_data, num_elements * sizeof(float));
// hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 100 < layer_density[j])
original_data[i] = 1;
else
original_data[i] = 0;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
hipEventRecord(start);
hipHostMalloc((void **)&compressed_data, n_threads * sizeof(float **));
hipHostMalloc((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE *
sizeof(unsigned int));
hipHostMalloc((void **)&compressed_data_taken, n_threads * sizeof(bool *));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].compressed_data_taken = compressed_data_taken;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
compressed_data_vec.push_back(compressed_data);
mask_vec.push_back(mask);
compressed_data_taken_vec.push_back(compressed_data_taken);
hipHostFree(original_data);
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli;
hipEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
hipHostFree(original_data);
// hipHostFree(compressed_data);
// hipHostFree(mask);
}
for (int i = 0; i < compression_times.size(); i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
// calculating space consumed
int k = 0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
long cur_space = 0;
if (!layer_compress[j]) {
cur_space = num_elements * sizeof(float);
total_space += cur_space;
continue;
}
bool **compressed_data_taken = compressed_data_taken_vec[k];
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
for (int thread_num = 0; thread_num < n_threads; thread_num++) {
for (int i = 0; i < n_compressed_data_batches; i++) {
if (compressed_data_taken[thread_num][i])
cur_space += compressed_data_batch_size;
else
break;
}
}
// add size of mask
cur_space += num_elements / COMPRESSION_BATCH_SIZE;
cur_space *= sizeof(float);
total_space += cur_space;
k++;
}
cout << "total_space_compressed(MB): " << total_space * 1.0 / (1024 * 1024)
<< endl;
// {
// int n;
// cout << "waiting..\n";
// cin >> n;
// }
// decompression
cout << "decompress" << endl;
vector<float> decompression_times;
float total_milli_decompress = 0.0;
for (int j = num_layers - 1; j >= 0; j--) {
if (!layer_compress[j]) continue;
long num_elements = layer_sizes[j];
float *original_data, ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
compressed_data = compressed_data_vec.back();
mask = mask_vec.back();
compressed_data_taken = compressed_data_taken_vec.back();
compressed_data_vec.pop_back();
mask_vec.pop_back();
compressed_data_taken_vec.pop_back();
// hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float));
cout << "starting " << j << endl;
hipEventRecord(start);
hipHostMalloc((void **)&original_data, num_elements * sizeof(float));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].compressed_data_taken = compressed_data_taken;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &decompressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
hipHostFree(compressed_data_taken);
hipHostFree(compressed_data);
hipHostFree(mask);
// hipHostFree(original_data);
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli;
hipEventElapsedTime(&milli, start, stop);
decompression_times.insert(decompression_times.begin(), milli);
total_milli_decompress += milli;
// cout << milli << endl;
// hipHostFree(compressed_data);
// hipHostFree(mask);
}
for (int i = 0; i < decompression_times.size(); i++) {
cout << decompression_times[i] << endl;
}
cout << total_milli_decompress << endl;
// calculating total space
total_space = 0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
long cur_space = 0;
cur_space = num_elements * sizeof(float);
total_space += cur_space;
}
cout << "total space(MB): " << total_space * 1.0 / (1024 * 1024) << endl;
} | 8656f784b21f7809840a807367659dfc91a3256b.cu | #include <pthread.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
};
struct CompressedPos {
long compressed_data_batch;
long offset;
};
int n_threads = 8;
int n_compressed_data_batches = 8;
long layer_sizes_alexnet[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256,
13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384,
13l * 13 * 256, 6l * 6 * 256};
bool layer_compress_alexnet[] = {true, true, true, true,
true, true, true, true};
long layer_density_alexnet[] = {50, 80, 40, 60, 70, 70, 30, 60};
int num_layers_alexnet = 8;
long layer_sizes_vgg[] = {224l * 224 * 64, 224l * 224 * 64, 112l * 112 * 64,
112l * 112 * 128, 112l * 112 * 128, 56l * 56 * 128,
56l * 56 * 256, 56l * 56 * 256, 56l * 56 * 256,
28l * 28 * 256, 28l * 28 * 512, 28l * 28 * 512,
28l * 28 * 512, 14l * 14 * 512, 14l * 14 * 512,
14l * 14 * 512, 14l * 14 * 512, 7l * 7 * 512};
long layer_density_vgg[] = {50, 20, 30, 20, 10, 20, 20, 20, 10,
20, 20, 10, 10, 10, 20, 20, 10, 15};
bool layer_compress_vgg[] = {true, true, true, true, true, true,
true, true, true, true, true, true,
true, true, true, true, true, true};
int num_layers_vgg = 18;
// long *layer_sizes = layer_sizes_alexnet;
// bool *layer_compress = layer_compress_alexnet;
// long *layer_density = layer_density_alexnet;
// int num_layers = num_layers_alexnet;
long *layer_sizes = layer_sizes_alexnet;
bool *layer_compress = layer_compress_alexnet;
long *layer_density = layer_density_alexnet;
int num_layers = num_layers_alexnet;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float ***compressed_data = thread_arg->compressed_data;
bool **compressed_data_taken = thread_arg->compressed_data_taken;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
cudaMallocHost((void **)&compressed_data[thread_num],
n_compressed_data_batches * sizeof(float *));
cudaMallocHost((void **)&compressed_data_taken[thread_num],
n_compressed_data_batches * sizeof(bool));
for (int i = 0; i < n_compressed_data_batches; i++) {
compressed_data_taken[thread_num][i] = false;
}
CompressedPos current_pos;
current_pos.compressed_data_batch = -1,
current_pos.offset = compressed_data_batch_size;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
if (current_pos.offset == compressed_data_batch_size) {
cudaMallocHost(
(void **)&compressed_data[thread_num]
[current_pos.compressed_data_batch + 1],
compressed_data_batch_size * sizeof(float));
compressed_data_taken[thread_num]
[current_pos.compressed_data_batch + 1] = true;
current_pos.compressed_data_batch =
current_pos.compressed_data_batch + 1;
current_pos.offset = 0;
}
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[thread_num][current_pos.compressed_data_batch]
[current_pos.offset] = original_data[j];
current_pos.offset += 1;
} else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
void *decompressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float ***compressed_data = thread_arg->compressed_data;
bool **compressed_data_taken = thread_arg->compressed_data_taken;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
// cudaMallocHost((void **)&compressed_data[thread_num],
// n_compressed_data_batches * sizeof(float *));
CompressedPos current_pos;
current_pos.compressed_data_batch = 0, current_pos.offset = 0;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (mask[mask_pos] & 0x80000000 > 0) {
original_data[j] =
compressed_data[thread_num][current_pos.compressed_data_batch]
[current_pos.offset];
current_pos.offset += 1;
if (current_pos.offset == compressed_data_batch_size) {
current_pos.compressed_data_batch += 1;
current_pos.offset = 0;
}
} else {
original_data[j] = 0;
}
mask[mask_pos] = mask[mask_pos] << 1;
}
}
for (int i = 0; i < n_compressed_data_batches; i++) {
if (compressed_data_taken[thread_num][i])
cudaFreeHost(compressed_data[thread_num][i]);
else
break;
}
cudaFreeHost(compressed_data_taken[thread_num]);
cudaFreeHost(compressed_data[thread_num]);
return NULL;
}
int main() {
int batch_size = 64;
long total_space = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
vector<float ***> compressed_data_vec;
vector<unsigned int *> mask_vec;
vector<bool **> compressed_data_taken_vec;
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
if (!layer_compress[j]) continue;
long num_elements = layer_sizes[j];
float *original_data, ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
cudaMallocHost((void **)&original_data, num_elements * sizeof(float));
// cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 100 < layer_density[j])
original_data[i] = 1;
else
original_data[i] = 0;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
cudaEventRecord(start);
cudaMallocHost((void **)&compressed_data, n_threads * sizeof(float **));
cudaMallocHost((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE *
sizeof(unsigned int));
cudaMallocHost((void **)&compressed_data_taken, n_threads * sizeof(bool *));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].compressed_data_taken = compressed_data_taken;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
compressed_data_vec.push_back(compressed_data);
mask_vec.push_back(mask);
compressed_data_taken_vec.push_back(compressed_data_taken);
cudaFreeHost(original_data);
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
cudaFreeHost(original_data);
// cudaFreeHost(compressed_data);
// cudaFreeHost(mask);
}
for (int i = 0; i < compression_times.size(); i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
// calculating space consumed
int k = 0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
long cur_space = 0;
if (!layer_compress[j]) {
cur_space = num_elements * sizeof(float);
total_space += cur_space;
continue;
}
bool **compressed_data_taken = compressed_data_taken_vec[k];
long compressed_data_batch_size =
num_elements / n_threads / n_compressed_data_batches;
for (int thread_num = 0; thread_num < n_threads; thread_num++) {
for (int i = 0; i < n_compressed_data_batches; i++) {
if (compressed_data_taken[thread_num][i])
cur_space += compressed_data_batch_size;
else
break;
}
}
// add size of mask
cur_space += num_elements / COMPRESSION_BATCH_SIZE;
cur_space *= sizeof(float);
total_space += cur_space;
k++;
}
cout << "total_space_compressed(MB): " << total_space * 1.0 / (1024 * 1024)
<< endl;
// {
// int n;
// cout << "waiting..\n";
// cin >> n;
// }
// decompression
cout << "decompress" << endl;
vector<float> decompression_times;
float total_milli_decompress = 0.0;
for (int j = num_layers - 1; j >= 0; j--) {
if (!layer_compress[j]) continue;
long num_elements = layer_sizes[j];
float *original_data, ***compressed_data;
bool **compressed_data_taken;
unsigned int *mask;
compressed_data = compressed_data_vec.back();
mask = mask_vec.back();
compressed_data_taken = compressed_data_taken_vec.back();
compressed_data_vec.pop_back();
mask_vec.pop_back();
compressed_data_taken_vec.pop_back();
// cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float));
cout << "starting " << j << endl;
cudaEventRecord(start);
cudaMallocHost((void **)&original_data, num_elements * sizeof(float));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].compressed_data_taken = compressed_data_taken;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &decompressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
cudaFreeHost(compressed_data_taken);
cudaFreeHost(compressed_data);
cudaFreeHost(mask);
// cudaFreeHost(original_data);
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
decompression_times.insert(decompression_times.begin(), milli);
total_milli_decompress += milli;
// cout << milli << endl;
// cudaFreeHost(compressed_data);
// cudaFreeHost(mask);
}
for (int i = 0; i < decompression_times.size(); i++) {
cout << decompression_times[i] << endl;
}
cout << total_milli_decompress << endl;
// calculating total space
total_space = 0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
long cur_space = 0;
cur_space = num_elements * sizeof(float);
total_space += cur_space;
}
cout << "total space(MB): " << total_space * 1.0 / (1024 * 1024) << endl;
} |
f9530986eb7054202e74a598a68e82a4defc9de0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "add_hip.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__
void add(unsigned int size, float time, float3* vertexBuf) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
hiprandState_t state;
hiprand_init(0, 0, 0, &state);
int randomVal = (hiprand(&state) % 3);
pos.y = sin(time * 0.001f + blockIdx.x * threadIdx.x * 0.5f) * 10;
vertexBuf[index] = pos;
}
__global__
void ClothPhys(unsigned int size, float time, float3* vertexBuf, float3 grav, float damping, float3* oldPositions) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
/*if (index == 0 || index == 256)
return;*/
float3 pos = vertexBuf[index];
float3 old = oldPositions[index];
float3 temp = pos;
float3 vel = make_float3((pos.x - old.x) / time, (pos.y - old.y) / time, (pos.z - old.z) / time);
float3 force = make_float3(grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping);
if (pos.y >= -5000.0f) {
pos.y = pos.y + (pos.y - old.y) * damping + grav.y * (time*time);
}
pos.x = pos.x + (pos.x - old.x) * damping + grav.x * (time*time);
pos.z = pos.z + (pos.z-old.z) * damping + grav.z * (time*time);
vertexBuf[index] = pos;
oldPositions[index] = temp;
}
__global__
void Integrate(unsigned int size, float time, float3* vertexBuf, float3 grav, float damping, float3* oldPositions) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
//if (index == 65791/* || index == 256*/)
// return;
float3 pos = vertexBuf[index];
float3 old = oldPositions[index];
float3 temp = pos;
float3 vel = make_float3((pos.x - old.x) / time, (pos.y - old.y) / time, (pos.z - old.z) / time);
float3 force = make_float3(grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping);
float3 acc = make_float3(force.x / 1, force.y / 1, force.z / 1);
pos.x = pos.x + (pos.x-old.x) * damping + grav.x * (time*time);
if (pos.y >= -5000.0f) {
pos.y = pos.y + (pos.y-old.y) * damping + grav.y * (time*time);
}
pos.z = pos.z + (pos.z-old.z) * damping + grav.z * (time*time);
////pos = pos + (pos - old) * damping + grav * (time*time);
/*if (pos.y < 0.0f) {
pos.y = 0.0f;
}*/
//pos.x = pos.x + (pos.x - old.x) + acc.x * (time*time);
//if (pos.y >= -5000.0f) {
//pos.y = pos.y + (pos.y - old.y) + acc.y * (time*time);
//}
//pos.z = pos.z + (pos.z - old.z) + acc.z * (time*time);
float3 pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9,
pos10, pos11, pos12;
float3 correctionVec = make_float3(0, 0, 0);
if (index < 65792) {
pos1 = vertexBuf[index + 257];
float3 temp;
temp.x = pos1.x - pos.x;
temp.y = pos1.y - pos.y;
temp.z = pos1.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257) {
pos2 = vertexBuf[index - 257];
float3 temp;
temp.x = pos2.x - pos.x;
temp.y = pos2.y - pos.y;
temp.z = pos2.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 256) {
pos3 = vertexBuf[index + 1];
float3 temp;
temp.x = pos3.x - pos.x;
temp.y = pos3.y - pos.y;
temp.z = pos3.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0) {
pos4 = vertexBuf[index - 1];
float3 temp;
temp.x = pos4.x - pos.x;
temp.y = pos4.y - pos.y;
temp.z = pos4.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
//vertexBuf[index] = pos;
if (index < 65535) {
pos5 = vertexBuf[index + 514];
float3 temp;
temp.x = pos5.x - pos.x;
temp.y = pos5.y - pos.y;
temp.z = pos5.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index > 514) {
pos6 = vertexBuf[index - 514];
float3 temp;
temp.x = pos6.x - pos.x;
temp.y = pos6.y - pos.y;
temp.z = pos6.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0 && index % 257 != 1) {
pos7 = vertexBuf[index - 2];
float3 temp;
temp.x = pos7.x - pos.x;
temp.y = pos7.y - pos.y;
temp.z = pos7.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 255 && index % 257 != 256) {
pos8 = vertexBuf[index + 2];
float3 temp;
temp.x = pos8.x - pos.x;
temp.y = pos8.y - pos.y;
temp.z = pos8.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257 && index % 257 != 0) {
pos9 = vertexBuf[index - 257 - 1];
float3 temp;
temp.x = pos9.x - pos.x;
temp.y = pos9.y - pos.y;
temp.z = pos9.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257 && index % 257 != 256) {
pos10 = vertexBuf[index - 257 + 1];
float3 temp;
temp.x = pos10.x - pos.x;
temp.y = pos10.y - pos.y;
temp.z = pos10.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index<65792 && index % 257 != 0) {
pos11 = vertexBuf[index + 257 - 1];
float3 temp;
temp.x = pos11.x - pos.x;
temp.y = pos11.y - pos.y;
temp.z = pos11.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index<65792 && index % 257 != 256) {
pos12 = vertexBuf[index + 257 + 1];
float3 temp;
temp.x = pos12.x - pos.x;
temp.y = pos12.y - pos.y;
temp.z = pos12.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
pos.x = pos.x + (correctionVec.x*0.5);
pos.y = pos.y + (correctionVec.y*0.5);
pos.z = pos.z + (correctionVec.z*0.5);
vertexBuf[index] = pos;
oldPositions[index] = temp;
}
__global__
void SphereConstraint(float3* vertexBuf, float3 spherePoint, float size, float radius) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
float3 delta;
delta.x = pos.x - spherePoint.x;
delta.y = pos.y - spherePoint.y;
delta.z = pos.z - spherePoint.z;
float dist = sqrt((delta.x*delta.x)+(delta.y*delta.y)+(delta.z*delta.z));
if (dist < radius) {
pos.x = spherePoint.x + delta.x *(radius / dist);
pos.y = spherePoint.y + delta.y *(radius / dist);
pos.z = spherePoint.z + delta.z *(radius / dist);
}
vertexBuf[index] = pos;
}
__global__
void DistanceConstraint(float3* vertexBuf, float size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
float3 pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9,
pos10, pos11, pos12;
float3 correctionVec = make_float3(0,0,0);
if (index < 65792) {
pos1 = vertexBuf[index+257];
float3 temp;
temp.x = pos1.x - pos.x;
temp.y = pos1.y - pos.y;
temp.z = pos1.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index > 257) {
pos2 = vertexBuf[index - 257];
float3 temp;
temp.x = pos2.x - pos.x;
temp.y = pos2.y - pos.y;
temp.z = pos2.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 256) {
pos3 = vertexBuf[index + 1];
float3 temp;
temp.x = pos3.x - pos.x;
temp.y = pos3.y - pos.y;
temp.z = pos3.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0) {
pos4 = vertexBuf[index - 1];
float3 temp;
temp.x = pos4.x - pos.x;
temp.y = pos4.y - pos.y;
temp.z = pos4.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
//vertexBuf[index] = pos;
pos.x = pos.x - (correctionVec.x);
pos.y = pos.y - (correctionVec.y);
pos.z = pos.z - (correctionVec.z);
vertexBuf[index] = pos;
}
__global__
void SpringConstraint(float3* vertexBuf, float size, int vertIndex, float3 massGrav) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
if (index % vertIndex != 0) {
return;
}
float3 pos = vertexBuf[index];
vertexBuf[index] = pos;
}
Add::Add(unsigned int size)
{
hipMalloc((void**)&oldPositions, size * sizeof(float3));
}
Add::~Add()
{
hipFree(oldPositions);
}
void Add::BindBuffers(HeightMap * map)
{
hipGraphicsGLRegisterBuffer(&vertexBuf, map->getVertexBuffer(), hipGraphicsMapFlagsNone);
/*hipGraphicsGLRegisterBuffer(&oldPos, map->getVertexBuffer(), hipGraphicsMapFlagsNone);*/
//dim3 block(256, 1, 1);
////dim3 grid((size + block.x - 1) / block.x, 1, 1);
}
void Add::AddByRand(unsigned int size, float time)
{
std::size_t tmpVertexPointerSize;
float3* tmpVertexPointer;
hipGraphicsMapResources(1, &vertexBuf, 0);
hipGraphicsResourceGetMappedPointer((void**)&tmpVertexPointer, &tmpVertexPointerSize, vertexBuf);
dim3 block(256, 1, 1);
dim3 grid((size + block.x - 1) / block.x, 1, 1);
add << <grid, block >> > (size, time, tmpVertexPointer);
hipGraphicsUnmapResources(1, &vertexBuf, 0);
}
void Add::IntergrateTest(unsigned int size, float time, float damping, Vector3 gravity)
{
std::size_t tmpVertexPointerSize;
float3* tmpVertexPointer;
//float3* oldPositions;
hipGraphicsMapResources(1, &vertexBuf, 0);
hipGraphicsResourceGetMappedPointer((void**)&tmpVertexPointer, &tmpVertexPointerSize, vertexBuf);
/*hipGraphicsMapResources(1, &vertexBuf, 0);
hipGraphicsResourceGetMappedPointer((void**)&oldPositions, &tmpVertexPointerSize, oldPos);*/
dim3 block(256, 1, 1);
dim3 grid((size + block.x - 1) / block.x, 1, 1);
float3 grav;
grav.x = gravity.x;
grav.y = gravity.y;
grav.z = gravity.z;
//-2000.0f, 2000.0f, -2000.0f
float3 spherePoint;
spherePoint.x = 2000.0f;
spherePoint.y = -2000.0f;
spherePoint.z = 2000.0f;
/*float3 massGrav;
massGrav.x = c->getMassGrav().x;
massGrav.y = c->getMassGrav().y;
massGrav.z = c->getMassGrav().z;*/
SphereConstraint << <grid, block >> > (tmpVertexPointer, spherePoint, size, 1010);
Integrate << <grid, block >> > (size, 0.25, tmpVertexPointer, grav, damping, oldPositions);
//FloorConstraint << <grid, block >> > (tmpVertexPointer, 0.0f, size);
//SpringConstraint <<<grid, block>>>(tmpVertexPointer, )
//DistanceConstraint << <grid, block >> > (tmpVertexPointer, size);
hipGraphicsUnmapResources(1, &vertexBuf, 0);
//hipGraphicsUnmapResources(1, &oldPos, 0);
}
| f9530986eb7054202e74a598a68e82a4defc9de0.cu |
#include "device_launch_parameters.h"
#include "add.cuh"
#include <curand.h>
#include <curand_kernel.h>
__global__
void add(unsigned int size, float time, float3* vertexBuf) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
curandState_t state;
curand_init(0, 0, 0, &state);
int randomVal = (curand(&state) % 3);
pos.y = sin(time * 0.001f + blockIdx.x * threadIdx.x * 0.5f) * 10;
vertexBuf[index] = pos;
}
__global__
void ClothPhys(unsigned int size, float time, float3* vertexBuf, float3 grav, float damping, float3* oldPositions) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
/*if (index == 0 || index == 256)
return;*/
float3 pos = vertexBuf[index];
float3 old = oldPositions[index];
float3 temp = pos;
float3 vel = make_float3((pos.x - old.x) / time, (pos.y - old.y) / time, (pos.z - old.z) / time);
float3 force = make_float3(grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping);
if (pos.y >= -5000.0f) {
pos.y = pos.y + (pos.y - old.y) * damping + grav.y * (time*time);
}
pos.x = pos.x + (pos.x - old.x) * damping + grav.x * (time*time);
pos.z = pos.z + (pos.z-old.z) * damping + grav.z * (time*time);
vertexBuf[index] = pos;
oldPositions[index] = temp;
}
__global__
void Integrate(unsigned int size, float time, float3* vertexBuf, float3 grav, float damping, float3* oldPositions) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
//if (index == 65791/* || index == 256*/)
// return;
float3 pos = vertexBuf[index];
float3 old = oldPositions[index];
float3 temp = pos;
float3 vel = make_float3((pos.x - old.x) / time, (pos.y - old.y) / time, (pos.z - old.z) / time);
float3 force = make_float3(grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping, grav.x * 1 + vel.x*damping);
float3 acc = make_float3(force.x / 1, force.y / 1, force.z / 1);
pos.x = pos.x + (pos.x-old.x) * damping + grav.x * (time*time);
if (pos.y >= -5000.0f) {
pos.y = pos.y + (pos.y-old.y) * damping + grav.y * (time*time);
}
pos.z = pos.z + (pos.z-old.z) * damping + grav.z * (time*time);
////pos = pos + (pos - old) * damping + grav * (time*time);
/*if (pos.y < 0.0f) {
pos.y = 0.0f;
}*/
//pos.x = pos.x + (pos.x - old.x) + acc.x * (time*time);
//if (pos.y >= -5000.0f) {
//pos.y = pos.y + (pos.y - old.y) + acc.y * (time*time);
//}
//pos.z = pos.z + (pos.z - old.z) + acc.z * (time*time);
float3 pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9,
pos10, pos11, pos12;
float3 correctionVec = make_float3(0, 0, 0);
if (index < 65792) {
pos1 = vertexBuf[index + 257];
float3 temp;
temp.x = pos1.x - pos.x;
temp.y = pos1.y - pos.y;
temp.z = pos1.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257) {
pos2 = vertexBuf[index - 257];
float3 temp;
temp.x = pos2.x - pos.x;
temp.y = pos2.y - pos.y;
temp.z = pos2.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 256) {
pos3 = vertexBuf[index + 1];
float3 temp;
temp.x = pos3.x - pos.x;
temp.y = pos3.y - pos.y;
temp.z = pos3.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0) {
pos4 = vertexBuf[index - 1];
float3 temp;
temp.x = pos4.x - pos.x;
temp.y = pos4.y - pos.y;
temp.z = pos4.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
//vertexBuf[index] = pos;
if (index < 65535) {
pos5 = vertexBuf[index + 514];
float3 temp;
temp.x = pos5.x - pos.x;
temp.y = pos5.y - pos.y;
temp.z = pos5.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index > 514) {
pos6 = vertexBuf[index - 514];
float3 temp;
temp.x = pos6.x - pos.x;
temp.y = pos6.y - pos.y;
temp.z = pos6.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0 && index % 257 != 1) {
pos7 = vertexBuf[index - 2];
float3 temp;
temp.x = pos7.x - pos.x;
temp.y = pos7.y - pos.y;
temp.z = pos7.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 255 && index % 257 != 256) {
pos8 = vertexBuf[index + 2];
float3 temp;
temp.x = pos8.x - pos.x;
temp.y = pos8.y - pos.y;
temp.z = pos8.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 32.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257 && index % 257 != 0) {
pos9 = vertexBuf[index - 257 - 1];
float3 temp;
temp.x = pos9.x - pos.x;
temp.y = pos9.y - pos.y;
temp.z = pos9.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index >= 257 && index % 257 != 256) {
pos10 = vertexBuf[index - 257 + 1];
float3 temp;
temp.x = pos10.x - pos.x;
temp.y = pos10.y - pos.y;
temp.z = pos10.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index<65792 && index % 257 != 0) {
pos11 = vertexBuf[index + 257 - 1];
float3 temp;
temp.x = pos11.x - pos.x;
temp.y = pos11.y - pos.y;
temp.z = pos11.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index<65792 && index % 257 != 256) {
pos12 = vertexBuf[index + 257 + 1];
float3 temp;
temp.x = pos12.x - pos.x;
temp.y = pos12.y - pos.y;
temp.z = pos12.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 22.62f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
pos.x = pos.x + (correctionVec.x*0.5);
pos.y = pos.y + (correctionVec.y*0.5);
pos.z = pos.z + (correctionVec.z*0.5);
vertexBuf[index] = pos;
oldPositions[index] = temp;
}
__global__
void SphereConstraint(float3* vertexBuf, float3 spherePoint, float size, float radius) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
float3 delta;
delta.x = pos.x - spherePoint.x;
delta.y = pos.y - spherePoint.y;
delta.z = pos.z - spherePoint.z;
float dist = sqrt((delta.x*delta.x)+(delta.y*delta.y)+(delta.z*delta.z));
if (dist < radius) {
pos.x = spherePoint.x + delta.x *(radius / dist);
pos.y = spherePoint.y + delta.y *(radius / dist);
pos.z = spherePoint.z + delta.z *(radius / dist);
}
vertexBuf[index] = pos;
}
__global__
void DistanceConstraint(float3* vertexBuf, float size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
float3 pos = vertexBuf[index];
float3 pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9,
pos10, pos11, pos12;
float3 correctionVec = make_float3(0,0,0);
if (index < 65792) {
pos1 = vertexBuf[index+257];
float3 temp;
temp.x = pos1.x - pos.x;
temp.y = pos1.y - pos.y;
temp.z = pos1.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index > 257) {
pos2 = vertexBuf[index - 257];
float3 temp;
temp.x = pos2.x - pos.x;
temp.y = pos2.y - pos.y;
temp.z = pos2.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 256) {
pos3 = vertexBuf[index + 1];
float3 temp;
temp.x = pos3.x - pos.x;
temp.y = pos3.y - pos.y;
temp.z = pos3.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
if (index % 257 != 0) {
pos4 = vertexBuf[index - 1];
float3 temp;
temp.x = pos4.x - pos.x;
temp.y = pos4.y - pos.y;
temp.z = pos4.z - pos.z;
float currentLen = sqrt((temp.x*temp.x) + (temp.y*temp.y) + (temp.z*temp.z));
float diff = (currentLen - 16.0f) / currentLen;
correctionVec.x = correctionVec.x + (temp.x * 0.2 * diff);
correctionVec.y = correctionVec.y + (temp.y * 0.2 * diff);
correctionVec.z = correctionVec.z + (temp.z * 0.2 * diff);
}
//vertexBuf[index] = pos;
pos.x = pos.x - (correctionVec.x);
pos.y = pos.y - (correctionVec.y);
pos.z = pos.z - (correctionVec.z);
vertexBuf[index] = pos;
}
__global__
void SpringConstraint(float3* vertexBuf, float size, int vertIndex, float3 massGrav) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
if (index % vertIndex != 0) {
return;
}
float3 pos = vertexBuf[index];
vertexBuf[index] = pos;
}
Add::Add(unsigned int size)
{
cudaMalloc((void**)&oldPositions, size * sizeof(float3));
}
Add::~Add()
{
cudaFree(oldPositions);
}
void Add::BindBuffers(HeightMap * map)
{
cudaGraphicsGLRegisterBuffer(&vertexBuf, map->getVertexBuffer(), cudaGraphicsMapFlagsNone);
/*cudaGraphicsGLRegisterBuffer(&oldPos, map->getVertexBuffer(), cudaGraphicsMapFlagsNone);*/
//dim3 block(256, 1, 1);
////dim3 grid((size + block.x - 1) / block.x, 1, 1);
}
void Add::AddByRand(unsigned int size, float time)
{
std::size_t tmpVertexPointerSize;
float3* tmpVertexPointer;
cudaGraphicsMapResources(1, &vertexBuf, 0);
cudaGraphicsResourceGetMappedPointer((void**)&tmpVertexPointer, &tmpVertexPointerSize, vertexBuf);
dim3 block(256, 1, 1);
dim3 grid((size + block.x - 1) / block.x, 1, 1);
add << <grid, block >> > (size, time, tmpVertexPointer);
cudaGraphicsUnmapResources(1, &vertexBuf, 0);
}
void Add::IntergrateTest(unsigned int size, float time, float damping, Vector3 gravity)
{
std::size_t tmpVertexPointerSize;
float3* tmpVertexPointer;
//float3* oldPositions;
cudaGraphicsMapResources(1, &vertexBuf, 0);
cudaGraphicsResourceGetMappedPointer((void**)&tmpVertexPointer, &tmpVertexPointerSize, vertexBuf);
/*cudaGraphicsMapResources(1, &vertexBuf, 0);
cudaGraphicsResourceGetMappedPointer((void**)&oldPositions, &tmpVertexPointerSize, oldPos);*/
dim3 block(256, 1, 1);
dim3 grid((size + block.x - 1) / block.x, 1, 1);
float3 grav;
grav.x = gravity.x;
grav.y = gravity.y;
grav.z = gravity.z;
//-2000.0f, 2000.0f, -2000.0f
float3 spherePoint;
spherePoint.x = 2000.0f;
spherePoint.y = -2000.0f;
spherePoint.z = 2000.0f;
/*float3 massGrav;
massGrav.x = c->getMassGrav().x;
massGrav.y = c->getMassGrav().y;
massGrav.z = c->getMassGrav().z;*/
SphereConstraint << <grid, block >> > (tmpVertexPointer, spherePoint, size, 1010);
Integrate << <grid, block >> > (size, 0.25, tmpVertexPointer, grav, damping, oldPositions);
//FloorConstraint << <grid, block >> > (tmpVertexPointer, 0.0f, size);
//SpringConstraint <<<grid, block>>>(tmpVertexPointer, )
//DistanceConstraint << <grid, block >> > (tmpVertexPointer, size);
cudaGraphicsUnmapResources(1, &vertexBuf, 0);
//cudaGraphicsUnmapResources(1, &oldPos, 0);
}
|
dcb27619e317c993e42e4ddfae95e3130ba2b847.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel3_ydir;
int xdim0_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel3_ydir;
int ydim0_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel3_ydir;
int xdim1_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel3_ydir;
int ydim1_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel3_ydir;
int xdim2_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel3_ydir;
int ydim2_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel3_ydir;
int xdim3_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel3_ydir;
int ydim3_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel3_ydir;
int xdim4_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel3_ydir;
int ydim4_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim5_advec_cell_kernel3_ydir;
int xdim5_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim5_advec_cell_kernel3_ydir;
int ydim5_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim6_advec_cell_kernel3_ydir;
int xdim6_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim6_advec_cell_kernel3_ydir;
int ydim6_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim7_advec_cell_kernel3_ydir;
int xdim7_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim7_advec_cell_kernel3_ydir;
int ydim7_advec_cell_kernel3_ydir_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel3_ydir*(y)+xdim0_advec_cell_kernel3_ydir*ydim0_advec_cell_kernel3_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel3_ydir*(y)+xdim1_advec_cell_kernel3_ydir*ydim1_advec_cell_kernel3_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel3_ydir*(y)+xdim2_advec_cell_kernel3_ydir*ydim2_advec_cell_kernel3_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel3_ydir*(y)+xdim3_advec_cell_kernel3_ydir*ydim3_advec_cell_kernel3_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel3_ydir*(y)+xdim4_advec_cell_kernel3_ydir*ydim4_advec_cell_kernel3_ydir*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel3_ydir*(y)+xdim5_advec_cell_kernel3_ydir*ydim5_advec_cell_kernel3_ydir*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel3_ydir*(y)+xdim6_advec_cell_kernel3_ydir*ydim6_advec_cell_kernel3_ydir*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel3_ydir*(y)+xdim7_advec_cell_kernel3_ydir*ydim7_advec_cell_kernel3_ydir*(z))
//user function
__device__
inline void advec_cell_kernel3_ydir( const double *vol_flux_y, const double *pre_vol, const int *yy,
const double *vertexdy,
const double *density1, const double *energy1 ,
double *mass_flux_y, double *ener_flux) {
double sigma, sigmat, sigmav, sigmam, sigma3, sigma4;
double diffuw, diffdw, limiter;
double one_by_six = 1.0/6.0;
int y_max=field.y_max;
int upwind,donor,downwind,dif;
if(vol_flux_y[OPS_ACC0(0,0,0)] > 0.0) {
upwind = -2;
donor = -1;
downwind = 0;
dif = donor;
}
else if (yy[OPS_ACC2(0,1,0)] < y_max+2-2) {
upwind = 1;
donor = 0;
downwind = -1;
dif = upwind;
} else {
upwind = 0;
donor = 0;
downwind = -1;
dif = upwind;
}
sigmat = fabs(vol_flux_y[OPS_ACC0(0,0,0)])/pre_vol[OPS_ACC1(0,donor,0)];
sigma3 = (1.0 + sigmat)*(vertexdy[OPS_ACC3(0,0,0)]/vertexdy[OPS_ACC3(0,dif,0)]);
sigma4 = 2.0 - sigmat;
sigma = sigmat;
sigmav = sigmat;
diffuw = density1[OPS_ACC4(0,donor,0)] - density1[OPS_ACC4(0,upwind,0)];
diffdw = density1[OPS_ACC4(0,downwind,0)] - density1[OPS_ACC4(0,donor,0)];
if( (diffuw*diffdw) > 0.0)
limiter=(1.0 - sigmav) * SIGN(1.0 , diffdw) *
MIN( MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3*fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter=0.0;
mass_flux_y[OPS_ACC6(0,0,0)] = (vol_flux_y[OPS_ACC0(0,0,0)]) * ( density1[OPS_ACC4(0,donor,0)] + limiter );
sigmam = fabs(mass_flux_y[OPS_ACC6(0,0,0)])/( density1[OPS_ACC4(0,donor,0)] * pre_vol[OPS_ACC1(0,donor,0)]);
diffuw = energy1[OPS_ACC5(0,donor,0)] - energy1[OPS_ACC5(0,upwind,0)];
diffdw = energy1[OPS_ACC5(0,downwind,0)] - energy1[OPS_ACC5(0,donor,0)];
if( (diffuw*diffdw) > 0.0)
limiter = (1.0 - sigmam) * SIGN(1.0,diffdw) *
MIN( MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter=0.0;
ener_flux[OPS_ACC7(0,0,0)] = mass_flux_y[OPS_ACC6(0,0,0)] * ( energy1[OPS_ACC5(0,donor,0)] + limiter );
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void ops_advec_cell_kernel3_ydir(
const double* __restrict arg0,
const double* __restrict arg1,
const int* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel3_ydir + idx_z * 1 * xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel3_ydir + idx_z * 1 * xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir;
arg2 += idx_x * 0 + idx_y * 1 * xdim2_advec_cell_kernel3_ydir + idx_z * 0 * xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir;
arg3 += idx_x * 0 + idx_y * 1 * xdim3_advec_cell_kernel3_ydir + idx_z * 0 * xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_cell_kernel3_ydir + idx_z * 1 * xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_cell_kernel3_ydir + idx_z * 1 * xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_advec_cell_kernel3_ydir + idx_z * 1 * xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir;
arg7 += idx_x * 1 + idx_y * 1 * xdim7_advec_cell_kernel3_ydir + idx_z * 1 * xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel3_ydir(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_advec_cell_kernel3_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
ops_timing_realloc(35,"advec_cell_kernel3_ydir");
OPS_kernels[35].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0]*args[7].dat->dim;
int ydim7 = args[7].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_advec_cell_kernel3_ydir_h || ydim0 != ydim0_advec_cell_kernel3_ydir_h || xdim1 != xdim1_advec_cell_kernel3_ydir_h || ydim1 != ydim1_advec_cell_kernel3_ydir_h || xdim2 != xdim2_advec_cell_kernel3_ydir_h || ydim2 != ydim2_advec_cell_kernel3_ydir_h || xdim3 != xdim3_advec_cell_kernel3_ydir_h || ydim3 != ydim3_advec_cell_kernel3_ydir_h || xdim4 != xdim4_advec_cell_kernel3_ydir_h || ydim4 != ydim4_advec_cell_kernel3_ydir_h || xdim5 != xdim5_advec_cell_kernel3_ydir_h || ydim5 != ydim5_advec_cell_kernel3_ydir_h || xdim6 != xdim6_advec_cell_kernel3_ydir_h || ydim6 != ydim6_advec_cell_kernel3_ydir_h || xdim7 != xdim7_advec_cell_kernel3_ydir_h || ydim7 != ydim7_advec_cell_kernel3_ydir_h) {
hipMemcpyToSymbol( xdim0_advec_cell_kernel3_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel3_ydir_h = xdim0;
hipMemcpyToSymbol( ydim0_advec_cell_kernel3_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel3_ydir_h = ydim0;
hipMemcpyToSymbol( xdim1_advec_cell_kernel3_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel3_ydir_h = xdim1;
hipMemcpyToSymbol( ydim1_advec_cell_kernel3_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel3_ydir_h = ydim1;
hipMemcpyToSymbol( xdim2_advec_cell_kernel3_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel3_ydir_h = xdim2;
hipMemcpyToSymbol( ydim2_advec_cell_kernel3_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel3_ydir_h = ydim2;
hipMemcpyToSymbol( xdim3_advec_cell_kernel3_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel3_ydir_h = xdim3;
hipMemcpyToSymbol( ydim3_advec_cell_kernel3_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel3_ydir_h = ydim3;
hipMemcpyToSymbol( xdim4_advec_cell_kernel3_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel3_ydir_h = xdim4;
hipMemcpyToSymbol( ydim4_advec_cell_kernel3_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel3_ydir_h = ydim4;
hipMemcpyToSymbol( xdim5_advec_cell_kernel3_ydir, &xdim5, sizeof(int) );
xdim5_advec_cell_kernel3_ydir_h = xdim5;
hipMemcpyToSymbol( ydim5_advec_cell_kernel3_ydir, &ydim5, sizeof(int) );
ydim5_advec_cell_kernel3_ydir_h = ydim5;
hipMemcpyToSymbol( xdim6_advec_cell_kernel3_ydir, &xdim6, sizeof(int) );
xdim6_advec_cell_kernel3_ydir_h = xdim6;
hipMemcpyToSymbol( ydim6_advec_cell_kernel3_ydir, &ydim6, sizeof(int) );
ydim6_advec_cell_kernel3_ydir_h = ydim6;
hipMemcpyToSymbol( xdim7_advec_cell_kernel3_ydir, &xdim7, sizeof(int) );
xdim7_advec_cell_kernel3_ydir_h = xdim7;
hipMemcpyToSymbol( ydim7_advec_cell_kernel3_ydir, &ydim7, sizeof(int) );
ydim7_advec_cell_kernel3_ydir_h = ydim7;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d];
#endif //OPS_MPI
int base7 = dat7 * 1 *
(start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]);
p_a[7] = (char *)args[7].data_d + base7;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
ops_timers_core(&c1,&t1);
OPS_kernels[35].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_cell_kernel3_ydir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[35].time += t2-t1;
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
//Update kernel record
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg7);
}
| dcb27619e317c993e42e4ddfae95e3130ba2b847.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel3_ydir;
int xdim0_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel3_ydir;
int ydim0_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel3_ydir;
int xdim1_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel3_ydir;
int ydim1_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel3_ydir;
int xdim2_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel3_ydir;
int ydim2_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel3_ydir;
int xdim3_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel3_ydir;
int ydim3_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel3_ydir;
int xdim4_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel3_ydir;
int ydim4_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim5_advec_cell_kernel3_ydir;
int xdim5_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim5_advec_cell_kernel3_ydir;
int ydim5_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim6_advec_cell_kernel3_ydir;
int xdim6_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim6_advec_cell_kernel3_ydir;
int ydim6_advec_cell_kernel3_ydir_h = -1;
__constant__ int xdim7_advec_cell_kernel3_ydir;
int xdim7_advec_cell_kernel3_ydir_h = -1;
__constant__ int ydim7_advec_cell_kernel3_ydir;
int ydim7_advec_cell_kernel3_ydir_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel3_ydir*(y)+xdim0_advec_cell_kernel3_ydir*ydim0_advec_cell_kernel3_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel3_ydir*(y)+xdim1_advec_cell_kernel3_ydir*ydim1_advec_cell_kernel3_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel3_ydir*(y)+xdim2_advec_cell_kernel3_ydir*ydim2_advec_cell_kernel3_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel3_ydir*(y)+xdim3_advec_cell_kernel3_ydir*ydim3_advec_cell_kernel3_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel3_ydir*(y)+xdim4_advec_cell_kernel3_ydir*ydim4_advec_cell_kernel3_ydir*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel3_ydir*(y)+xdim5_advec_cell_kernel3_ydir*ydim5_advec_cell_kernel3_ydir*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel3_ydir*(y)+xdim6_advec_cell_kernel3_ydir*ydim6_advec_cell_kernel3_ydir*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel3_ydir*(y)+xdim7_advec_cell_kernel3_ydir*ydim7_advec_cell_kernel3_ydir*(z))
//user function
__device__
inline void advec_cell_kernel3_ydir( const double *vol_flux_y, const double *pre_vol, const int *yy,
const double *vertexdy,
const double *density1, const double *energy1 ,
double *mass_flux_y, double *ener_flux) {
double sigma, sigmat, sigmav, sigmam, sigma3, sigma4;
double diffuw, diffdw, limiter;
double one_by_six = 1.0/6.0;
int y_max=field.y_max;
int upwind,donor,downwind,dif;
if(vol_flux_y[OPS_ACC0(0,0,0)] > 0.0) {
upwind = -2;
donor = -1;
downwind = 0;
dif = donor;
}
else if (yy[OPS_ACC2(0,1,0)] < y_max+2-2) {
upwind = 1;
donor = 0;
downwind = -1;
dif = upwind;
} else {
upwind = 0;
donor = 0;
downwind = -1;
dif = upwind;
}
sigmat = fabs(vol_flux_y[OPS_ACC0(0,0,0)])/pre_vol[OPS_ACC1(0,donor,0)];
sigma3 = (1.0 + sigmat)*(vertexdy[OPS_ACC3(0,0,0)]/vertexdy[OPS_ACC3(0,dif,0)]);
sigma4 = 2.0 - sigmat;
sigma = sigmat;
sigmav = sigmat;
diffuw = density1[OPS_ACC4(0,donor,0)] - density1[OPS_ACC4(0,upwind,0)];
diffdw = density1[OPS_ACC4(0,downwind,0)] - density1[OPS_ACC4(0,donor,0)];
if( (diffuw*diffdw) > 0.0)
limiter=(1.0 - sigmav) * SIGN(1.0 , diffdw) *
MIN( MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3*fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter=0.0;
mass_flux_y[OPS_ACC6(0,0,0)] = (vol_flux_y[OPS_ACC0(0,0,0)]) * ( density1[OPS_ACC4(0,donor,0)] + limiter );
sigmam = fabs(mass_flux_y[OPS_ACC6(0,0,0)])/( density1[OPS_ACC4(0,donor,0)] * pre_vol[OPS_ACC1(0,donor,0)]);
diffuw = energy1[OPS_ACC5(0,donor,0)] - energy1[OPS_ACC5(0,upwind,0)];
diffdw = energy1[OPS_ACC5(0,downwind,0)] - energy1[OPS_ACC5(0,donor,0)];
if( (diffuw*diffdw) > 0.0)
limiter = (1.0 - sigmam) * SIGN(1.0,diffdw) *
MIN( MIN(fabs(diffuw), fabs(diffdw)),
one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw)));
else
limiter=0.0;
ener_flux[OPS_ACC7(0,0,0)] = mass_flux_y[OPS_ACC6(0,0,0)] * ( energy1[OPS_ACC5(0,donor,0)] + limiter );
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void ops_advec_cell_kernel3_ydir(
const double* __restrict arg0,
const double* __restrict arg1,
const int* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel3_ydir + idx_z * 1 * xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel3_ydir + idx_z * 1 * xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir;
arg2 += idx_x * 0 + idx_y * 1 * xdim2_advec_cell_kernel3_ydir + idx_z * 0 * xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir;
arg3 += idx_x * 0 + idx_y * 1 * xdim3_advec_cell_kernel3_ydir + idx_z * 0 * xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_cell_kernel3_ydir + idx_z * 1 * xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_cell_kernel3_ydir + idx_z * 1 * xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_advec_cell_kernel3_ydir + idx_z * 1 * xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir;
arg7 += idx_x * 1 + idx_y * 1 * xdim7_advec_cell_kernel3_ydir + idx_z * 1 * xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel3_ydir(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_advec_cell_kernel3_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
ops_timing_realloc(35,"advec_cell_kernel3_ydir");
OPS_kernels[35].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0]*args[7].dat->dim;
int ydim7 = args[7].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_advec_cell_kernel3_ydir_h || ydim0 != ydim0_advec_cell_kernel3_ydir_h || xdim1 != xdim1_advec_cell_kernel3_ydir_h || ydim1 != ydim1_advec_cell_kernel3_ydir_h || xdim2 != xdim2_advec_cell_kernel3_ydir_h || ydim2 != ydim2_advec_cell_kernel3_ydir_h || xdim3 != xdim3_advec_cell_kernel3_ydir_h || ydim3 != ydim3_advec_cell_kernel3_ydir_h || xdim4 != xdim4_advec_cell_kernel3_ydir_h || ydim4 != ydim4_advec_cell_kernel3_ydir_h || xdim5 != xdim5_advec_cell_kernel3_ydir_h || ydim5 != ydim5_advec_cell_kernel3_ydir_h || xdim6 != xdim6_advec_cell_kernel3_ydir_h || ydim6 != ydim6_advec_cell_kernel3_ydir_h || xdim7 != xdim7_advec_cell_kernel3_ydir_h || ydim7 != ydim7_advec_cell_kernel3_ydir_h) {
cudaMemcpyToSymbol( xdim0_advec_cell_kernel3_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel3_ydir_h = xdim0;
cudaMemcpyToSymbol( ydim0_advec_cell_kernel3_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel3_ydir_h = ydim0;
cudaMemcpyToSymbol( xdim1_advec_cell_kernel3_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel3_ydir_h = xdim1;
cudaMemcpyToSymbol( ydim1_advec_cell_kernel3_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel3_ydir_h = ydim1;
cudaMemcpyToSymbol( xdim2_advec_cell_kernel3_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel3_ydir_h = xdim2;
cudaMemcpyToSymbol( ydim2_advec_cell_kernel3_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel3_ydir_h = ydim2;
cudaMemcpyToSymbol( xdim3_advec_cell_kernel3_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel3_ydir_h = xdim3;
cudaMemcpyToSymbol( ydim3_advec_cell_kernel3_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel3_ydir_h = ydim3;
cudaMemcpyToSymbol( xdim4_advec_cell_kernel3_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel3_ydir_h = xdim4;
cudaMemcpyToSymbol( ydim4_advec_cell_kernel3_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel3_ydir_h = ydim4;
cudaMemcpyToSymbol( xdim5_advec_cell_kernel3_ydir, &xdim5, sizeof(int) );
xdim5_advec_cell_kernel3_ydir_h = xdim5;
cudaMemcpyToSymbol( ydim5_advec_cell_kernel3_ydir, &ydim5, sizeof(int) );
ydim5_advec_cell_kernel3_ydir_h = ydim5;
cudaMemcpyToSymbol( xdim6_advec_cell_kernel3_ydir, &xdim6, sizeof(int) );
xdim6_advec_cell_kernel3_ydir_h = xdim6;
cudaMemcpyToSymbol( ydim6_advec_cell_kernel3_ydir, &ydim6, sizeof(int) );
ydim6_advec_cell_kernel3_ydir_h = ydim6;
cudaMemcpyToSymbol( xdim7_advec_cell_kernel3_ydir, &xdim7, sizeof(int) );
xdim7_advec_cell_kernel3_ydir_h = xdim7;
cudaMemcpyToSymbol( ydim7_advec_cell_kernel3_ydir, &ydim7, sizeof(int) );
ydim7_advec_cell_kernel3_ydir_h = ydim7;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d];
#endif //OPS_MPI
int base7 = dat7 * 1 *
(start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]);
p_a[7] = (char *)args[7].data_d + base7;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
ops_timers_core(&c1,&t1);
OPS_kernels[35].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_advec_cell_kernel3_ydir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[35].time += t2-t1;
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
//Update kernel record
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[35].transfer += ops_compute_transfer(dim, range, &arg7);
}
|
9b5385ec5eeb20fa6421cda5ac3eb34c3ad0e3f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "su/pixel.hpp"
namespace sp{
// float *MG, int MG_steps, cols = 32
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22
// mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd, xxb, yyb, xxe, yye
template<int _BX/*must be 32*/, int _BY>
__global__ void kernel_init_theta(
float *MG, int MG_steps,
su::PixI *iC, int iC_steps,
int W, int H, int v_x, int v_y, int n_x, int n_y, int xhr, int yhr, int hxs, int hys,
float isx, float isy, float isl, float isa, float isb,
float vxy00, float vxy01, float vxy10, float vxy11,
float vab00, float vab01, float vab10, float vab11, float isd
){
int tx = threadIdx.x; // 0-31
int ty = threadIdx.y; //
__shared__ float smem[_BY][_BX];
int k_x = blockIdx.x;
int k_y = blockIdx.y*_BY + ty;
int k = k_x + k_y*n_x;
int fx = (xhr + k_x*v_x + hxs);
int fy = (yhr + k_y*v_y + hys);
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
// mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd
if (k_x < n_x && k_y < n_y) {
su::PixI pix = iC[fy*iC_steps + fx]; // permute?
smem[ty][0] = fx;
smem[ty][1] = fy;
smem[ty][2] = pix.f0();
smem[ty][3] = pix.f1();
smem[ty][4] = pix.f2();// 5 is not initialized
smem[ty][5] = isx;
smem[ty][6] = isy;
smem[ty][7] = isl;
smem[ty][8] = isa;
smem[ty][9] = isb;
smem[ty][10] = vxy00;
smem[ty][11] = vxy01;
smem[ty][12] = vxy10;
smem[ty][13] = vxy11;
smem[ty][14] = vab00;
smem[ty][15] = vab01;
smem[ty][16] = vab10;
smem[ty][17] = vab11;
smem[ty][18] = isd;
}
__syncthreads();
if (k_x >= n_x || k_y >= n_y){
return;
}
if (tx < 19)
MG[k*MG_steps + tx] = smem[ty][tx];
}
void gpu_init_theta(float *MG, int MG_steps, su::PixI *iC, int iC_steps, int W, int H, int v_x, int v_y, float sl, float sa, float sb){
int n_x = W / v_x;
int n_y = H / v_y;
float isx = 1. / (v_x*v_x); // x
float isy = 1. / (v_y*v_y); // y
float isl = 1. / (sl*sl); // l
float isa = 1. / (sa*sa); // a
float isb = 1. / (sb*sb); // b
float vxy00 = 1., vxy01 = 0.; // direction on x
float vxy10 = 0., vxy11 = 1.; // direction on y
float vab00 = 1., vab01 = 0.; // direction on a
float vab10 = 0., vab11 = 1.; // direction on b
float isd = sqrt(isx * isy * isl * isa * isb);
int xhr = (W - n_x*v_x) >> 1;
int yhr = (H - n_y*v_y) >> 1;
int hxs = v_x >> 1;
int hys = v_y >> 1;
#define _BX 32
#define _BY 4
dim3 blocks(_BX, _BY);
dim3 grids;
grids.x = (n_x);
grids.y = (n_y + blocks.y - 1) / blocks.y;
kernel_init_theta<_BX, _BY> << <grids, blocks >> >(MG, MG_steps, iC, iC_steps, W, H, v_x, v_y, n_x, n_y, xhr, yhr, hxs, hys, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd);
#undef _BX
#undef _BY
}
} | 9b5385ec5eeb20fa6421cda5ac3eb34c3ad0e3f0.cu | #include "su/pixel.hpp"
namespace sp{
// float *MG, int MG_steps, cols = 32
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22
// mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd, xxb, yyb, xxe, yye
template<int _BX/*must be 32*/, int _BY>
__global__ void kernel_init_theta(
float *MG, int MG_steps,
su::PixI *iC, int iC_steps,
int W, int H, int v_x, int v_y, int n_x, int n_y, int xhr, int yhr, int hxs, int hys,
float isx, float isy, float isl, float isa, float isb,
float vxy00, float vxy01, float vxy10, float vxy11,
float vab00, float vab01, float vab10, float vab11, float isd
){
int tx = threadIdx.x; // 0-31
int ty = threadIdx.y; //
__shared__ float smem[_BY][_BX];
int k_x = blockIdx.x;
int k_y = blockIdx.y*_BY + ty;
int k = k_x + k_y*n_x;
int fx = (xhr + k_x*v_x + hxs);
int fy = (yhr + k_y*v_y + hys);
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
// mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd
if (k_x < n_x && k_y < n_y) {
su::PixI pix = iC[fy*iC_steps + fx]; // permute?
smem[ty][0] = fx;
smem[ty][1] = fy;
smem[ty][2] = pix.f0();
smem[ty][3] = pix.f1();
smem[ty][4] = pix.f2();// 5 is not initialized
smem[ty][5] = isx;
smem[ty][6] = isy;
smem[ty][7] = isl;
smem[ty][8] = isa;
smem[ty][9] = isb;
smem[ty][10] = vxy00;
smem[ty][11] = vxy01;
smem[ty][12] = vxy10;
smem[ty][13] = vxy11;
smem[ty][14] = vab00;
smem[ty][15] = vab01;
smem[ty][16] = vab10;
smem[ty][17] = vab11;
smem[ty][18] = isd;
}
__syncthreads();
if (k_x >= n_x || k_y >= n_y){
return;
}
if (tx < 19)
MG[k*MG_steps + tx] = smem[ty][tx];
}
void gpu_init_theta(float *MG, int MG_steps, su::PixI *iC, int iC_steps, int W, int H, int v_x, int v_y, float sl, float sa, float sb){
int n_x = W / v_x;
int n_y = H / v_y;
float isx = 1. / (v_x*v_x); // x
float isy = 1. / (v_y*v_y); // y
float isl = 1. / (sl*sl); // l
float isa = 1. / (sa*sa); // a
float isb = 1. / (sb*sb); // b
float vxy00 = 1., vxy01 = 0.; // direction on x
float vxy10 = 0., vxy11 = 1.; // direction on y
float vab00 = 1., vab01 = 0.; // direction on a
float vab10 = 0., vab11 = 1.; // direction on b
float isd = sqrt(isx * isy * isl * isa * isb);
int xhr = (W - n_x*v_x) >> 1;
int yhr = (H - n_y*v_y) >> 1;
int hxs = v_x >> 1;
int hys = v_y >> 1;
#define _BX 32
#define _BY 4
dim3 blocks(_BX, _BY);
dim3 grids;
grids.x = (n_x);
grids.y = (n_y + blocks.y - 1) / blocks.y;
kernel_init_theta<_BX, _BY> << <grids, blocks >> >(MG, MG_steps, iC, iC_steps, W, H, v_x, v_y, n_x, n_y, xhr, yhr, hxs, hys, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd);
#undef _BX
#undef _BY
}
} |
dd3322ddf5038311bd50a63bd553933b4f75d549.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__global__ void kernel(int *image, int *bicub, int N, int bicImgRows, int bicImCols, int imgRows, int imgCols)
{
int j = 1 + threadIdx.x + blockIdx.x * blockDim.x;
int i = 1 + threadIdx.y + blockIdx.y * blockDim.y;
/*int i = threadIdx.x;
int j = blockIdx.y;*/
//printf("In kernel");
//for (int j = 1; j < bicImgRows-1; j++)
while (j < bicImgRows - 2)
{
float tmp = (float) (j) / (float) (bicImgRows - 1) * (imgRows - 1);
int h = (int) floor(tmp);
if (h < 1)
{
h = 1;
}
else
{
if (h >= imgRows - 2)
{
h = imgRows - 3;
}
}
float u = tmp - h;
//for (int i = 1; i < bicImCols-1; i++)
while ( i < bicImCols - 2)
{
tmp = (float) (i) / (float) (bicImCols - 1) * (imgCols - 1);
int w = (int) floor(tmp);
if (w < 1)
{
w = 1;
}
else
{
if (w >= imgCols - 2)
{
w = imgCols - 3;
}
}
float t = tmp - w;
//
float b1 = (1.0f/4)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t - 2)*(t + 1);
float b2 = - (1.0f/4) *(u)* (u + 1)*(u - 2)*(t - 1)*(t - 2)*(t + 1) ;
float b3 = - (1.0f/4)*(t)*(u - 1)*(u - 2)*(u + 1)*(t + 1)*(t - 2) ;
float b4 = (1.0f/4)*(u)*(t)*(u + 1)*(u - 2)*(t + 1)*(t - 2) ;
float b5 = - (1.0f/12)*(u)*(u - 1)*(u - 2)*(t - 1)*(t - 2)*(t + 1) ;
float b6 = - (1.0f/12)*(t)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t - 2) ;
float b7 = (1.0f/12)*(u)*(t)*(u - 1)*(u - 2)*(t + 1)*(t - 2) ;
float b8 = (1.0f/12)*(u)*(t)*(u + 1)*(u - 2)*(t - 1)*(t - 2) ;
float b9 = (1.0f/12)*(u)*(u - 1)*(u + 1)*(t - 1)*(t - 2)*(t + 1) ;
float b10 = (1.0f/12)*(t)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t + 1) ;
float b11 = (1.0f/36)*(u)*(t)*(u - 1)*(u - 2)*(t - 1)*(t - 2) ;
float b12 = - (1.0f/12)*(u)*(t)*(u - 1)*(u + 1)*(t + 1)*(t - 2) ;
float b13 = - (1.0f/12)*(u)*(t)*(u + 1)*(u - 2)*(t - 1)*(t + 1) ;
float b14 = - (1.0f/36)*(u)*(t)*(u - 1)*(u + 1)*(t - 1)*(t - 2) ;
float b15 = - (1.0f/36)*(u)*(t)*(u - 1)*(u - 2)*(t - 1)*(t + 1) ;
float b16 = (1.0f/36)*(u)*(t)*(u - 1)*(u + 1)*(t - 1)*(t + 1) ;
// : a[i][j]
float p1 = image[h * imgCols + w];
float p2 = image[(h+1) * imgCols + w];
float p3 = image[h * imgCols + w+1];
float p4 = image[(h+1) * imgCols + w+1];
float p5 = image[(h-1) * imgCols + w];
float p6 = image[h * imgCols + w-1];
float p7 = image[(h-1) * imgCols + w+1];
float p8 = image[(h+1) * imgCols + w-1];
float p9 = image[(h+2) * imgCols + w];
float p10 = image[(h) * imgCols + w+2];
float p11= image[(h-1) * imgCols + w-1];
float p12= image[(h+2) * imgCols + w+1];
float p13= image[(h+1) * imgCols + w+2];
float p14= image[(h+2) * imgCols + w-1];
float p15= image[(h-1) * imgCols + w+2];
float p16= image[(h+2) * imgCols + w+2];
//
float newPixel = p1 *b1 + p2 *b2 + p3 *b3 + p4 *b4 + p5 *b5+ p6 *b6+ p7 *b7+ p8 *b8+ p9 *b9+ p10 *b10+ p11 *b11+ p12 *b12+ p13 *b13+ p14 *b14+ p15 *b15+ p16 *b16;
//
bicub[j *bicImCols + i] = abs(newPixel);
i += blockDim.y * gridDim.y;
}
j += blockDim.x * gridDim.x;
}
}
void StartCuda(int *image, int *bicub, int N, int bicImgRows, int bicImCols, int imgRows, int imgCols)
{
dim3 block( (bicImgRows + 511) / 512, (bicImCols + 511) / 512);
dim3 grid(512, 512);
//dim3 grid(2560, 2560);
//dim3 block(16, 16);
printf("\nstart kernel");
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, image, bicub, N, bicImgRows, bicImCols, imgRows, imgCols);
//kernel<<<1024, 1024>>>(image, bicub, N, bicImgRows, bicImCols, imgRows, imgCols);
}
| dd3322ddf5038311bd50a63bd553933b4f75d549.cu | #include "kernel.cuh"
__global__ void kernel(int *image, int *bicub, int N, int bicImgRows, int bicImCols, int imgRows, int imgCols)
{
int j = 1 + threadIdx.x + blockIdx.x * blockDim.x;
int i = 1 + threadIdx.y + blockIdx.y * blockDim.y;
/*int i = threadIdx.x;
int j = blockIdx.y;*/
//printf("In kernel");
//for (int j = 1; j < bicImgRows-1; j++)
while (j < bicImgRows - 2)
{
float tmp = (float) (j) / (float) (bicImgRows - 1) * (imgRows - 1);
int h = (int) floor(tmp);
if (h < 1)
{
h = 1;
}
else
{
if (h >= imgRows - 2)
{
h = imgRows - 3;
}
}
float u = tmp - h;
//for (int i = 1; i < bicImCols-1; i++)
while ( i < bicImCols - 2)
{
tmp = (float) (i) / (float) (bicImCols - 1) * (imgCols - 1);
int w = (int) floor(tmp);
if (w < 1)
{
w = 1;
}
else
{
if (w >= imgCols - 2)
{
w = imgCols - 3;
}
}
float t = tmp - w;
// Коэффициенты
float b1 = (1.0f/4)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t - 2)*(t + 1);
float b2 = - (1.0f/4) *(u)* (u + 1)*(u - 2)*(t - 1)*(t - 2)*(t + 1) ;
float b3 = - (1.0f/4)*(t)*(u - 1)*(u - 2)*(u + 1)*(t + 1)*(t - 2) ;
float b4 = (1.0f/4)*(u)*(t)*(u + 1)*(u - 2)*(t + 1)*(t - 2) ;
float b5 = - (1.0f/12)*(u)*(u - 1)*(u - 2)*(t - 1)*(t - 2)*(t + 1) ;
float b6 = - (1.0f/12)*(t)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t - 2) ;
float b7 = (1.0f/12)*(u)*(t)*(u - 1)*(u - 2)*(t + 1)*(t - 2) ;
float b8 = (1.0f/12)*(u)*(t)*(u + 1)*(u - 2)*(t - 1)*(t - 2) ;
float b9 = (1.0f/12)*(u)*(u - 1)*(u + 1)*(t - 1)*(t - 2)*(t + 1) ;
float b10 = (1.0f/12)*(t)*(u - 1)*(u - 2)*(u + 1)*(t - 1)*(t + 1) ;
float b11 = (1.0f/36)*(u)*(t)*(u - 1)*(u - 2)*(t - 1)*(t - 2) ;
float b12 = - (1.0f/12)*(u)*(t)*(u - 1)*(u + 1)*(t + 1)*(t - 2) ;
float b13 = - (1.0f/12)*(u)*(t)*(u + 1)*(u - 2)*(t - 1)*(t + 1) ;
float b14 = - (1.0f/36)*(u)*(t)*(u - 1)*(u + 1)*(t - 1)*(t - 2) ;
float b15 = - (1.0f/36)*(u)*(t)*(u - 1)*(u - 2)*(t - 1)*(t + 1) ;
float b16 = (1.0f/36)*(u)*(t)*(u - 1)*(u + 1)*(t - 1)*(t + 1) ;
// Окрестные пиксели: a[i][j]
float p1 = image[h * imgCols + w];
float p2 = image[(h+1) * imgCols + w];
float p3 = image[h * imgCols + w+1];
float p4 = image[(h+1) * imgCols + w+1];
float p5 = image[(h-1) * imgCols + w];
float p6 = image[h * imgCols + w-1];
float p7 = image[(h-1) * imgCols + w+1];
float p8 = image[(h+1) * imgCols + w-1];
float p9 = image[(h+2) * imgCols + w];
float p10 = image[(h) * imgCols + w+2];
float p11= image[(h-1) * imgCols + w-1];
float p12= image[(h+2) * imgCols + w+1];
float p13= image[(h+1) * imgCols + w+2];
float p14= image[(h+2) * imgCols + w-1];
float p15= image[(h-1) * imgCols + w+2];
float p16= image[(h+2) * imgCols + w+2];
// Компоненты
float newPixel = p1 *b1 + p2 *b2 + p3 *b3 + p4 *b4 + p5 *b5+ p6 *b6+ p7 *b7+ p8 *b8+ p9 *b9+ p10 *b10+ p11 *b11+ p12 *b12+ p13 *b13+ p14 *b14+ p15 *b15+ p16 *b16;
// Новый пиксел
bicub[j *bicImCols + i] = abs(newPixel);
i += blockDim.y * gridDim.y;
}
j += blockDim.x * gridDim.x;
}
}
void StartCuda(int *image, int *bicub, int N, int bicImgRows, int bicImCols, int imgRows, int imgCols)
{
dim3 block( (bicImgRows + 511) / 512, (bicImCols + 511) / 512);
dim3 grid(512, 512);
//dim3 grid(2560, 2560);
//dim3 block(16, 16);
printf("\nstart kernel");
kernel<<<grid, block>>>(image, bicub, N, bicImgRows, bicImCols, imgRows, imgCols);
//kernel<<<1024, 1024>>>(image, bicub, N, bicImgRows, bicImCols, imgRows, imgCols);
}
|
e354043600fed8906480f635f7f772ae0dd9bed4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string>
#include <cfloat>
#include <ctime>
#include <limits>
#include <algorithm>
#include <stack>
#include <queue>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <device_launch_parameters.h>
#include "Camera.cuh"
#include "Scene.cuh"
#include "Node.cuh"
#include "filters.hh"
#define STB_IMAGE_STATIC
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
#define cuRandom (hiprand_uniform(&local_random))
void error(const char *message) {
std::cout << message << std::endl;
exit(0);
}
void format() {
std::cout << "File format for scene." << std::endl;
std::cout << "\t # Comment, skip line." << std::endl;
std::cout << "Spheres -> type center material" << std::endl;
std::cout << "\t 1 Indicates that the 3D model is a Sphere object." << std::endl;
std::cout << "\t Center The center of the Sphere." << std::endl;
std::cout << "\t Radius The radius of the Sphere." << std::endl;
std::cout << "\t Material -> type albedo [fuzz] [ref_idx]" << std::endl;
std::cout << "\t\t 0 LAMBERTIAN" << std::endl;
std::cout << "\t\t 1 METAL" << std::endl;
std::cout << "\t\t 2 DIELECTRIC" << std::endl;
std::cout << "\t\t 3 DIFFUSE LIGHT" << std::endl;
std::cout << "\t\t albedo Defines the color." << std::endl;
std::cout << "\t\t fuzz Only for METAL." << std::endl;
std::cout << "\t\t ref_idx Only for DIELECTRIC." << std::endl;
std::cout << "Examples of declaration:\n" << std::endl;
std::cout << "# my scene" << std::endl;
std::cout << "Object Center Rad Material Albedo Fuzz/ref_idx" << std::endl;
std::cout << "1 0 1 0 2 1 0.5 0.78 0.9 " << std::endl;
std::cout << "1 0 4 0 2 2 1 0 0.9 2 " << std::endl;
std::cout << "1 1 4 1 2 3 0.9 0.9 0.9 1.5 " << std::endl;
}
void help(){
std::cout << "\n" << std::endl;
std::cout << "\t[-d] [--defult] Set the parameters to default values" << std::endl;
std::cout << "\t size: (1280x720) | AAit: 50 | depth: 50 | spheres: 11 | nthreads: 32" << std::endl;
std::cout << "\t[-sizeX] Size in pixels of coordinate X. Number greater than 0." << std::endl;
std::cout << "\t[-sizeY] Size in pixels of coordinate Y. Number greater than 0." << std::endl;
std::cout << "\t[-AAit] Number of iterations to calculate color in one pixel. Number greater than 0." << std::endl;
std::cout << "\t[-depth] The attenuation of scattered ray. Number greater than 0." << std::endl;
// std::cout << "\t[-spheres] Factor number to calculate the number of spheres in the scene. Number greater than 0." << std::endl;
std::cout << "\t[-light] Turn on/off the ambient light. Values can be ON/OFF" << std::endl;
std::cout << "\t[-nthreads] Number of threads to use" << std::endl;
std::cout << "\t[-nGPUs] Number of GPUs to distribute the work" << std::endl;
std::cout << "\t[-i][--image] File name of pic generated." << std::endl;
std::cout << "\t[-f][--file] File name of the scene." << std::endl;
std::cout << "\t[-h][--help] Show help." << std::endl;
std::cout << "\t #spheres = (2*spheres)*(2*spheres) + 4" << std::endl;
std::cout << "\n" << std::endl;
std::cout << "Examples of usage:" << std::endl;
std::cout << "./path_tracing_NGPUs -d" << std::endl;
std::cout << "./path_tracing_NGPUs -nthreads 16 -sizeX 2000"<< std::endl;
format();
exit(1);
}
void parse_argv(int argc, char **argv, int &nx, int &ny, int &ns, int &depth, int &dist, std::string &image, std::string &filename, bool &light, bool &random, bool &filter, int &diameterBi, float &gs, float &gr, int &diameterMean, int &diameterMedian, bool &skybox, bool &oneTex, int &nthreads , int &numGPUs, const int count){
if(argc <= 1) error("Error usage. Use [-h] [--help] to see the usage.");
nx = 1280; ny = 720; ns = 50; depth = 50; dist = 11; image = "image";
filter = false; gs = 0; gr = 0; diameterBi = 11; diameterMean = 3; diameterMedian = 3;
skybox = false; oneTex = false;
light = true; random = true;
bool imageName = false;
nthreads = 32; numGPUs = 1;
bool v_default = false;
for(int i = 1; i < argc; i += 2){
if(v_default) error("Error usage. Use [-h] [--help] to see the usage.");
if (std::string(argv[i]) == "-d" || std::string(argv[i]) == "--default"){
if((i+1) < argc) error("The default parameter cannot have more arguments.");
std::cerr << "Default\n";
v_default = true;
}
else if (std::string(argv[i]) == "-sizeX"){
if((i+1) >= argc) error("-sizeX value expected");
nx = atoi(argv[i+1]);
if(nx == 0) error("-sizeX value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-sizeY"){
if((i+1) >= argc) error("-sizeY value expected");
ny = atoi(argv[i+1]);
if(ny == 0) error("-sizeY value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-AAit"){
if((i+1) >= argc) error("-AAit value expected");
ns = atoi(argv[i+1]);
if(ns == 0) error("-AAit value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-depth"){
if((i+1) >= argc) error("-depth value expected");
depth = atoi(argv[i+1]);
if(depth == 0) error("-depth value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-i" || std::string(argv[i]) == "--image"){
if((i+1) >= argc) error("--image / -i file expected");
filename = std::string(argv[i+1]);
imageName = true;
}
else if(std::string(argv[i]) == "-f" || std::string(argv[i]) == "--file"){
if((i+1) >= argc) error("-name file expected");
filename = std::string(argv[i+1]);
if(!imageName) image = filename;
filename = filename+".txt";
random = false;
}
else if(std::string(argv[i]) == "-light") {
if((i+1) >= argc) error("-light value expected");
if(std::string(argv[i+1]) == "ON") light = true;
else if(std::string(argv[i+1]) == "OFF") light = false;
}
else if (std::string(argv[i]) == "-filter") {
filter = true;
diameterBi = atoi(argv[i+1]);
i += 2;
gs = atof(argv[i]);
gr = atof(argv[i+1]);
i+=2;
diameterMean = atoi(argv[i]);
diameterMedian = atoi(argv[i+1]);
}
else if(std::string(argv[i]) == "-skybox") {
if((i+1) >= argc) error("-skybox value expected");
if(std::string(argv[i+1]) == "ON") skybox = true;
else if(std::string(argv[i+1]) == "OFF") skybox = false;
}
else if(std::string(argv[i]) == "-oneTex") {
if((i+1) >= argc) error("-oneTex value expected");
if(std::string(argv[i+1]) == "ON") oneTex = true;
else if(std::string(argv[i+1]) == "OFF") oneTex = false;
}
else if(std::string(argv[i]) == "-nGPUs"){
if((i+1) >= argc) error("-nGPUs value expected");
numGPUs = atoi(argv[i+1]);
if(numGPUs == 0) error("-nGPUs value expected or cannot be 0");
numGPUs = ::min(numGPUs, count);
}
else if(std::string(argv[i]) == "-nthreads"){
if((i+1) >= argc) error("-nthreads value expected");
nthreads = atoi(argv[i+1]);
if(nthreads == 0) error("-nthreads value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-h" || std::string(argv[i]) == "--help" ){
help();
}
else{
error("Error usage. Use [-h] [--help] to see the usage.");
}
}
if(!light) image = image+"_noktem";
image = image+".png";
}
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line){
if(result){
std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << std::endl;
std::cout << hipGetErrorString(result) << std::endl;
hipDeviceReset();
exit(99);
}
}
void properties(){
std::cout << "GPU Info " << std::endl;
hipSetDevice(0);
int device;
hipGetDevice(&device);
hipDeviceProp_t properties;
//checkCudaErrors( hipDeviceSetLimit( hipLimitMallocHeapSize, 67108864 ) );
checkCudaErrors( hipDeviceSetLimit( hipLimitStackSize, 131072 ) );
checkCudaErrors( hipGetDeviceProperties( &properties, device ) );
size_t limit1;
checkCudaErrors( hipDeviceGetLimit( &limit1, hipLimitMallocHeapSize ) );
size_t limit2;
checkCudaErrors( hipDeviceGetLimit( &limit2, hipLimitStackSize ) );
if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) ) {
std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl;
std::cout << "Compute mode: " << properties.computeMode << std::endl;
std::cout << "Concurrent Kernels: " << properties.concurrentKernels << std::endl;
std::cout << "Warp size: " << properties.warpSize << std::endl;
std::cout << "Major: " << properties.major << " Minor: " << properties.minor << std::endl;
std::cout << "Cuda limit heap size: " << limit1 << std::endl;
std::cout << "Cuda limit stack size: " << limit2 << "\n\n" << std::endl;
}
else std::cout << "GPU " << device << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
__device__ Vector3 color(const Ray& ray, Node *world, int depth, bool light, bool skybox, hiprandState_t *random, Skybox *sky, bool oneTex, unsigned char **d_textures){
Ray cur_ray = ray;
Vector3 cur_attenuation = Vector3::One();
for(int i = 0; i < depth; i++){
hit_record rec;
if( world->intersect(cur_ray, 0.00001, FLT_MAX, rec) ) {
Ray scattered;
Vector3 attenuation;
Vector3 emitted = rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures);
if(rec.mat_ptr.scatter(cur_ray, rec, attenuation, scattered, random, oneTex, d_textures)){
cur_attenuation *= attenuation;
cur_attenuation += emitted;
cur_ray = scattered;
}
else return cur_attenuation * emitted;
}
else {
if(skybox && sky->hit(cur_ray, 0.00001, FLT_MAX, rec)){
return cur_attenuation * rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures);
}
else {
if(light) {
Vector3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5*(unit_direction.y() + 1.0);
Vector3 c = (1.0 - t)*Vector3::One() + t*Vector3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
else return Vector3::Zero();
}
}
}
return Vector3::Zero();
}
__device__ int LongestCommonPrefix(int i, int j, int numObjects, Triangle *d_list) {
if(i < 0 or i > numObjects - 1 or j < 0 or j > numObjects - 1) return -1;
int codeI = d_list[i].getMorton();
int codeJ = d_list[j].getMorton();
if(i == j) {
printf("Equals Longest\n");
return __clz(codeI ^ codeJ);
}
else return __clz(codeI ^ codeJ);
}
__device__ int findSplit(Triangle *d_list, int first, int last) {
if(first == last){
return -1;
}
int firstCode = d_list[first].getMorton();
int lastCode = d_list[last].getMorton();
int commonPrefix = __clz(firstCode ^ lastCode);
int split = first;
int step = last - first;
do {
step = (step + 1 ) >> 1;
int newSplit = split + step;
if(newSplit < last){
int splitCode = d_list[newSplit].getMorton();
int splitPrefix = __clz(firstCode ^ splitCode);
if(splitPrefix > commonPrefix){
split = newSplit;
}
}
} while (step > 1);
return split;
}
__device__ int2 determineRange(Triangle *d_list, int idx, int objs) {
int d = LongestCommonPrefix(idx, idx + 1, objs, d_list) -
LongestCommonPrefix(idx, idx - 1, objs, d_list) >= 0 ? 1 : -1;
int dmin = LongestCommonPrefix(idx, idx - d, objs, d_list);
int lmax = 2;
while(LongestCommonPrefix(idx, idx + lmax*d, objs, d_list) > dmin){
lmax <<=1;
}
int l = 0;
int div = 2;
for(int t = lmax/div; t >= 1; t >>= 1) {
if(LongestCommonPrefix(idx, idx + (l + t) * d, objs, d_list) > dmin) l += t;
}
int jdx = idx + l * d;
if(jdx < idx) return make_int2(jdx,idx);
else return make_int2(idx,jdx);
}
__global__ void setupCamera(Camera **d_cam, int nx, int ny, Camera cam) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*d_cam = new Camera(cam.getLookfrom(), cam.getLookat(), cam.getVUP(), cam.getFOV(), float(nx)/float(ny), cam.getAperture(), cam.getFocus(),0.0,0.1);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state,unsigned long long seed) {
int num = blockIdx.x*blockDim.x + threadIdx.x;
int i = num%max_x;
int j = num/max_x;
if( (i >= max_x) || (j >= max_y) ) return;
int pixel_index = num;
hiprand_init((seed << 20) + pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void initLeafNodes(Node *leafNodes, int objs, Triangle *d_list) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
leafNodes[idx].obj = &d_list[idx];
leafNodes[idx].box = d_list[idx].getBox();
}
__global__ void boundingBoxBVH(Node *d_internalNodes, Node *d_leafNodes, int objs, int nodes, int *nodeCounter) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
Node *leaf = d_leafNodes + idx;
Node* current = leaf->parent;
int currentIdx = current - d_internalNodes;
int res = atomicAdd(nodeCounter + currentIdx, 1);
while (true) {
if(res == 0) return;
aabb leftBoundingBox = current->left->box;
aabb rightBoundingBox = current->right->box;
current->box = surrounding_box(leftBoundingBox, rightBoundingBox);
if (current == d_internalNodes) {
return;
}
current = current->parent;
currentIdx = current - d_internalNodes;
res = atomicAdd(nodeCounter + currentIdx, 1);
}
}
__global__ void constructBVH(Node *d_internalNodes, Node *leafNodes, int objs, Triangle *d_list) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
int2 range = determineRange(d_list, idx, objs+1);
int first = range.x;
int last = range.y;
int split = findSplit(d_list, first, last);
if(split == -1){
split = (first+last) >> 1;
++last;
}
Node *current = d_internalNodes + idx;
if(split == first) {
current->left = leafNodes + split;
current->left->isLeaf = true;
current->left->isLeft = true;
(leafNodes + split)->parent = current;
}
else{
current->left = d_internalNodes + split;
current->left->isLeft = true;
(d_internalNodes + split)->parent = current;
}
if (split + 1 == last) {
current->right = leafNodes + split + 1;
current->right->isLeaf = true;
current->right->isRight = true;
(leafNodes + split + 1)->parent = current;
}
else{
current->right = d_internalNodes + split + 1;
current->right->isRight = true;
(d_internalNodes + split + 1)->parent = current;
}
}
__global__ void render(Vector3 *fb, int max_x, int max_y, int ns, Camera **cam, Node *world, hiprandState_t *d_rand_state, int depth, bool light, bool skybox, Skybox *sky, bool oneTex, unsigned char ** d_textures) {
int num = blockIdx.x*blockDim.x + threadIdx.x;
int i = num%max_x;
int j = num/max_x;
hiprandState_t local_random;
int pixel_index = num;
local_random = d_rand_state[pixel_index];
Vector3 col(0,0,0);
for(int s = 0; s < ns; s++){
float u = float(i + cuRandom) / float(max_x);
float v = float(j + cuRandom) / float(max_y);
Ray r = (*cam)->get_ray(u, v, &local_random);
col += color(r, world, depth, light, skybox, &local_random, sky, oneTex, d_textures);
}
d_rand_state[pixel_index] = local_random;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void checkBVH(Node *d_internalNodes, Node *d_leaves, int objs){
if (threadIdx.x == 0 && blockIdx.x == 0){
printf("Checking BVH...\n");
for(int i = 0; i < objs; i++){
if(!d_leaves[i].parent){
printf("Leaf without parent %d\n",i);
}
}
for(int i = 0; i < objs-1; i++){
if(!d_internalNodes[i].left){
printf("Internal without left %d\n",i);
}
if(!d_internalNodes[i].right){
printf("Internal without right %d\n",i);
}
if(!d_internalNodes[i].parent){
printf("Internal without parent %d\n",i);
}
}
printf("BVH checked!\n");
}
}
int main(int argc, char **argv) {
hipDeviceReset();
properties();
hipEvent_t E0, E1;
hipEventCreate(&E0);
hipEventCreate(&E1);
float totalTime;
int nx, ny, ns, depth, dist, diameterBi, diameterMean, diameterMedian, nthreads, numGPUs;
bool light, random, filter, skybox, oneTex;
float gs, gr;
std::string filename, image;
parse_argv(argc, argv, nx, ny, ns, depth, dist, image, filename, light, random, filter, diameterBi, gs, gr, diameterMean, diameterMedian, skybox, oneTex, nthreads, numGPUs, 1);
/* Seed for CUDA cuRandom */
unsigned long long int seed = 1000;
/* #pixels of the image */
int num_pixels = nx*ny;
int size = 0;
int num_textures = 0;
/* Host variables */
float fb_size = num_pixels*sizeof(Vector3);
float drand_size = num_pixels*sizeof(hiprandState_t);
float cam_size = sizeof(Camera*);
Vector3 *h_frameBuffer;
Node *h_internalNodes;
int *h_nodeCounter;
int blocks = (nx * ny)/(numGPUs * nthreads);
/* Create world */
Scene scene(dist, nx, ny);
if(random) scene.loadScene(TRIANGL);
else scene.loadScene(FFILE,filename,oneTex);
Triangle *h_objects = scene.getObjects();
Skybox *h_skybox = scene.getSkybox();
unsigned char **textures;
unsigned char **h_textures;
Vector3 *textureSizes;
if(oneTex){
textures = scene.getTextures();
textureSizes = scene.getTextureSizes();
num_textures = scene.getNumTextures();
}
size = scene.getSize();
float ob_size = size*sizeof(Triangle);
int threads = nthreads;
while(size < threads) threads /= 2;
int blocks2 = (size+threads-1)/(threads);
std::cout << "Creating " << image << " with (" << nx << "," << ny << ") pixels with " << nthreads << " threads, using " << numGPUs << " GPUs." << std::endl;
std::cout << "With " << ns << " iterations for AntiAliasing and depth of " << depth << "." << std::endl;
std::cout << "The world have " << size << " objects." << std::endl;
if(light) std::cout << "Ambient light ON" << std::endl;
else std::cout << "Ambient light OFF" << std::endl;
/* Device variables */
Vector3 *d_frameBuffer;
Triangle *d_objects;
Camera **d_cam;
hiprandState_t *d_rand_state;
Node *d_internalNodes;
Node *d_leafNodes;
int *d_nodeCounter;
Skybox *d_skybox;
unsigned char **d_textures;
float internal_size = (size-1)*sizeof(Node);
float leaves_size = size*sizeof(Node);
/* Allocate Memory Host */
hipHostMalloc((Vector3**)&h_frameBuffer, fb_size);
hipHostMalloc((Node **)&h_internalNodes, internal_size);
hipHostMalloc((int **) &h_nodeCounter, sizeof(int)*size);
/* Allocate memory on Device */
hipMallocManaged((void **)&d_frameBuffer, fb_size);
hipMalloc((void **)&d_objects, ob_size);
hipMalloc((void **)&d_cam, cam_size);
hipMalloc((void **)&d_rand_state, drand_size);
hipMalloc((void **)&d_internalNodes, internal_size);
hipMalloc((void **)&d_leafNodes, leaves_size);
hipMalloc((void **)&d_nodeCounter, sizeof(int)*size - 1);
hipMemset(d_nodeCounter, 0, sizeof(int)*size - 1);
hipMalloc((void **)&d_skybox, sizeof(Skybox));
if(num_textures > 0){
int count = 0;
for(int i = 0; i < num_textures; i++){
Vector3 p = textureSizes[i];
count += (p[0]*p[1]*p[2]);
}
h_textures = (unsigned char **) malloc(sizeof(unsigned char)*count);
std::cout << "Binding textures" << std::endl;
for(int i = 0; i < num_textures; i++){
std::cout << "Texture " << i << std::endl;
Vector3 p = textureSizes[i];
unsigned char *image = textures[i];
hipMalloc((void**)&h_textures[i], sizeof(unsigned char)*p[0]*p[1]*p[2]);
hipMemcpy(h_textures[i], image, sizeof(unsigned char)*p[0]*p[1]*p[2], hipMemcpyHostToDevice);
}
hipMalloc(&d_textures, sizeof(unsigned char *) * num_textures);
hipMemcpy(d_textures, h_textures, sizeof(unsigned char*) * num_textures, hipMemcpyHostToDevice);
}
h_skybox->hostToDevice(0);
hipEventRecord(E0,0);
hipEventSynchronize(E0);
hipMemcpy(d_skybox, h_skybox, sizeof(Skybox), hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
hipMemcpy(d_objects, h_objects, ob_size, hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( setupCamera), dim3(1),dim3(1), 0, 0, d_cam,nx,ny, scene.getCamera());
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(nthreads), 0, 0, nx, ny, d_rand_state, seed);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( initLeafNodes), dim3(blocks2),dim3(threads), 0, 0, d_leafNodes, size, d_objects);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( constructBVH), dim3(blocks2),dim3(threads), 0, 0, d_internalNodes, d_leafNodes, size-1, d_objects);
checkCudaErrors(hipGetLastError());
// checkBVH<<<1,1>>>(d_internalNodes, d_leafNodes, size);
// checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( boundingBoxBVH), dim3(blocks2),dim3(threads), 0, 0, d_internalNodes, d_leafNodes, size, size*2-1, d_nodeCounter);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( render), dim3(blocks), dim3(nthreads), 0, 0, d_frameBuffer, nx, ny, ns, d_cam, d_internalNodes, d_rand_state, depth, light, skybox, d_skybox, oneTex, d_textures);
checkCudaErrors(hipGetLastError());
/* Copiamos del Device al Host*/
hipMemcpy(h_frameBuffer, d_frameBuffer, fb_size, hipMemcpyDeviceToHost);
checkCudaErrors(hipGetLastError());
hipEventRecord(E1,0);
checkCudaErrors(hipGetLastError());
hipEventSynchronize(E1);
checkCudaErrors(hipGetLastError());
hipEventElapsedTime(&totalTime,E0,E1);
checkCudaErrors(hipGetLastError());
std::cout << "Total time: " << totalTime << " milisegs. " << std::endl;
std::cout << "Generating file image..." << std::endl;
uint8_t *data = new uint8_t[nx*ny*3];
int count = 0;
for(int j = ny-1; j >= 0; j--){
for(int i = 0; i < nx; i++){
size_t pixel_index = j*nx + i;
Vector3 col = h_frameBuffer[pixel_index];
int ir = int(255.99*col.r());
int ig = int(255.99*col.g());
int ib = int(255.99*col.b());
data[count++] = ir;
data[count++] = ig;
data[count++] = ib;
}
}
hipFree(d_cam);
hipFree(d_objects);
hipFree(d_rand_state);
hipFree(d_frameBuffer);
hipFree(d_nodeCounter);
hipFree(d_leafNodes);
hipFree(d_internalNodes);
hipEventDestroy(E0);
hipEventDestroy(E1);
image = "../Resources/Images/GPU_BVH_IT/"+image;
stbi_write_png(image.c_str(), nx, ny, 3, data, nx*3);
if(filter){
std::cout << "Filtering image using bilateral filter with Gs = " << gs << " and Gr = " << gr << " and window of diameter " << diameterBi << std::endl;
std::string filenameFiltered = image.substr(0, image.length()-4) + "_bilateral_filter.png";
int sx, sy, sc;
unsigned char *imageData = stbi_load(image.c_str(), &sx, &sy, &sc, 0);
unsigned char *imageFiltered = new unsigned char[sx*sy*3];;
bilateralFilter(diameterBi, sx, sy, imageData, imageFiltered, gs, gr);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
std::cout << "Filtering image using median filter with window of diameter " << diameterMedian << std::endl;
filenameFiltered = image.substr(0, image.length()-4) + "_median_filter.png";
medianFilter(diameterMedian, sx, sy, imageData, imageFiltered);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
std::cout << "Filtering image using mean filter with window of diameter " << diameterMean << std::endl;
filenameFiltered = image.substr(0, image.length()-4) + "_mean_filter.png";
meanFilter(diameterMean,sx, sy, imageData, imageFiltered);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
}
}
| e354043600fed8906480f635f7f772ae0dd9bed4.cu | #include <iostream>
#include <fstream>
#include <string>
#include <cfloat>
#include <ctime>
#include <limits>
#include <algorithm>
#include <stack>
#include <queue>
#include <curand.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include "Camera.cuh"
#include "Scene.cuh"
#include "Node.cuh"
#include "filters.hh"
#define STB_IMAGE_STATIC
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
#define cuRandom (curand_uniform(&local_random))
void error(const char *message) {
std::cout << message << std::endl;
exit(0);
}
void format() {
std::cout << "File format for scene." << std::endl;
std::cout << "\t # Comment, skip line." << std::endl;
std::cout << "Spheres -> type center material" << std::endl;
std::cout << "\t 1 Indicates that the 3D model is a Sphere object." << std::endl;
std::cout << "\t Center The center of the Sphere." << std::endl;
std::cout << "\t Radius The radius of the Sphere." << std::endl;
std::cout << "\t Material -> type albedo [fuzz] [ref_idx]" << std::endl;
std::cout << "\t\t 0 LAMBERTIAN" << std::endl;
std::cout << "\t\t 1 METAL" << std::endl;
std::cout << "\t\t 2 DIELECTRIC" << std::endl;
std::cout << "\t\t 3 DIFFUSE LIGHT" << std::endl;
std::cout << "\t\t albedo Defines the color." << std::endl;
std::cout << "\t\t fuzz Only for METAL." << std::endl;
std::cout << "\t\t ref_idx Only for DIELECTRIC." << std::endl;
std::cout << "Examples of declaration:\n" << std::endl;
std::cout << "# my scene" << std::endl;
std::cout << "Object Center Rad Material Albedo Fuzz/ref_idx" << std::endl;
std::cout << "1 0 1 0 2 1 0.5 0.78 0.9 " << std::endl;
std::cout << "1 0 4 0 2 2 1 0 0.9 2 " << std::endl;
std::cout << "1 1 4 1 2 3 0.9 0.9 0.9 1.5 " << std::endl;
}
void help(){
std::cout << "\n" << std::endl;
std::cout << "\t[-d] [--defult] Set the parameters to default values" << std::endl;
std::cout << "\t size: (1280x720) | AAit: 50 | depth: 50 | spheres: 11 | nthreads: 32" << std::endl;
std::cout << "\t[-sizeX] Size in pixels of coordinate X. Number greater than 0." << std::endl;
std::cout << "\t[-sizeY] Size in pixels of coordinate Y. Number greater than 0." << std::endl;
std::cout << "\t[-AAit] Number of iterations to calculate color in one pixel. Number greater than 0." << std::endl;
std::cout << "\t[-depth] The attenuation of scattered ray. Number greater than 0." << std::endl;
// std::cout << "\t[-spheres] Factor number to calculate the number of spheres in the scene. Number greater than 0." << std::endl;
std::cout << "\t[-light] Turn on/off the ambient light. Values can be ON/OFF" << std::endl;
std::cout << "\t[-nthreads] Number of threads to use" << std::endl;
std::cout << "\t[-nGPUs] Number of GPUs to distribute the work" << std::endl;
std::cout << "\t[-i][--image] File name of pic generated." << std::endl;
std::cout << "\t[-f][--file] File name of the scene." << std::endl;
std::cout << "\t[-h][--help] Show help." << std::endl;
std::cout << "\t #spheres = (2*spheres)*(2*spheres) + 4" << std::endl;
std::cout << "\n" << std::endl;
std::cout << "Examples of usage:" << std::endl;
std::cout << "./path_tracing_NGPUs -d" << std::endl;
std::cout << "./path_tracing_NGPUs -nthreads 16 -sizeX 2000"<< std::endl;
format();
exit(1);
}
void parse_argv(int argc, char **argv, int &nx, int &ny, int &ns, int &depth, int &dist, std::string &image, std::string &filename, bool &light, bool &random, bool &filter, int &diameterBi, float &gs, float &gr, int &diameterMean, int &diameterMedian, bool &skybox, bool &oneTex, int &nthreads , int &numGPUs, const int count){
if(argc <= 1) error("Error usage. Use [-h] [--help] to see the usage.");
nx = 1280; ny = 720; ns = 50; depth = 50; dist = 11; image = "image";
filter = false; gs = 0; gr = 0; diameterBi = 11; diameterMean = 3; diameterMedian = 3;
skybox = false; oneTex = false;
light = true; random = true;
bool imageName = false;
nthreads = 32; numGPUs = 1;
bool v_default = false;
for(int i = 1; i < argc; i += 2){
if(v_default) error("Error usage. Use [-h] [--help] to see the usage.");
if (std::string(argv[i]) == "-d" || std::string(argv[i]) == "--default"){
if((i+1) < argc) error("The default parameter cannot have more arguments.");
std::cerr << "Default\n";
v_default = true;
}
else if (std::string(argv[i]) == "-sizeX"){
if((i+1) >= argc) error("-sizeX value expected");
nx = atoi(argv[i+1]);
if(nx == 0) error("-sizeX value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-sizeY"){
if((i+1) >= argc) error("-sizeY value expected");
ny = atoi(argv[i+1]);
if(ny == 0) error("-sizeY value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-AAit"){
if((i+1) >= argc) error("-AAit value expected");
ns = atoi(argv[i+1]);
if(ns == 0) error("-AAit value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-depth"){
if((i+1) >= argc) error("-depth value expected");
depth = atoi(argv[i+1]);
if(depth == 0) error("-depth value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-i" || std::string(argv[i]) == "--image"){
if((i+1) >= argc) error("--image / -i file expected");
filename = std::string(argv[i+1]);
imageName = true;
}
else if(std::string(argv[i]) == "-f" || std::string(argv[i]) == "--file"){
if((i+1) >= argc) error("-name file expected");
filename = std::string(argv[i+1]);
if(!imageName) image = filename;
filename = filename+".txt";
random = false;
}
else if(std::string(argv[i]) == "-light") {
if((i+1) >= argc) error("-light value expected");
if(std::string(argv[i+1]) == "ON") light = true;
else if(std::string(argv[i+1]) == "OFF") light = false;
}
else if (std::string(argv[i]) == "-filter") {
filter = true;
diameterBi = atoi(argv[i+1]);
i += 2;
gs = atof(argv[i]);
gr = atof(argv[i+1]);
i+=2;
diameterMean = atoi(argv[i]);
diameterMedian = atoi(argv[i+1]);
}
else if(std::string(argv[i]) == "-skybox") {
if((i+1) >= argc) error("-skybox value expected");
if(std::string(argv[i+1]) == "ON") skybox = true;
else if(std::string(argv[i+1]) == "OFF") skybox = false;
}
else if(std::string(argv[i]) == "-oneTex") {
if((i+1) >= argc) error("-oneTex value expected");
if(std::string(argv[i+1]) == "ON") oneTex = true;
else if(std::string(argv[i+1]) == "OFF") oneTex = false;
}
else if(std::string(argv[i]) == "-nGPUs"){
if((i+1) >= argc) error("-nGPUs value expected");
numGPUs = atoi(argv[i+1]);
if(numGPUs == 0) error("-nGPUs value expected or cannot be 0");
numGPUs = std::min(numGPUs, count);
}
else if(std::string(argv[i]) == "-nthreads"){
if((i+1) >= argc) error("-nthreads value expected");
nthreads = atoi(argv[i+1]);
if(nthreads == 0) error("-nthreads value expected or cannot be 0");
}
else if(std::string(argv[i]) == "-h" || std::string(argv[i]) == "--help" ){
help();
}
else{
error("Error usage. Use [-h] [--help] to see the usage.");
}
}
if(!light) image = image+"_noktem";
image = image+".png";
}
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line){
if(result){
std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << std::endl;
std::cout << cudaGetErrorString(result) << std::endl;
cudaDeviceReset();
exit(99);
}
}
void properties(){
std::cout << "GPU Info " << std::endl;
cudaSetDevice(0);
int device;
cudaGetDevice(&device);
cudaDeviceProp properties;
//checkCudaErrors( cudaDeviceSetLimit( cudaLimitMallocHeapSize, 67108864 ) );
checkCudaErrors( cudaDeviceSetLimit( cudaLimitStackSize, 131072 ) );
checkCudaErrors( cudaGetDeviceProperties( &properties, device ) );
size_t limit1;
checkCudaErrors( cudaDeviceGetLimit( &limit1, cudaLimitMallocHeapSize ) );
size_t limit2;
checkCudaErrors( cudaDeviceGetLimit( &limit2, cudaLimitStackSize ) );
if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) ) {
std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl;
std::cout << "Compute mode: " << properties.computeMode << std::endl;
std::cout << "Concurrent Kernels: " << properties.concurrentKernels << std::endl;
std::cout << "Warp size: " << properties.warpSize << std::endl;
std::cout << "Major: " << properties.major << " Minor: " << properties.minor << std::endl;
std::cout << "Cuda limit heap size: " << limit1 << std::endl;
std::cout << "Cuda limit stack size: " << limit2 << "\n\n" << std::endl;
}
else std::cout << "GPU " << device << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
__device__ Vector3 color(const Ray& ray, Node *world, int depth, bool light, bool skybox, curandState *random, Skybox *sky, bool oneTex, unsigned char **d_textures){
Ray cur_ray = ray;
Vector3 cur_attenuation = Vector3::One();
for(int i = 0; i < depth; i++){
hit_record rec;
if( world->intersect(cur_ray, 0.00001, FLT_MAX, rec) ) {
Ray scattered;
Vector3 attenuation;
Vector3 emitted = rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures);
if(rec.mat_ptr.scatter(cur_ray, rec, attenuation, scattered, random, oneTex, d_textures)){
cur_attenuation *= attenuation;
cur_attenuation += emitted;
cur_ray = scattered;
}
else return cur_attenuation * emitted;
}
else {
if(skybox && sky->hit(cur_ray, 0.00001, FLT_MAX, rec)){
return cur_attenuation * rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures);
}
else {
if(light) {
Vector3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5*(unit_direction.y() + 1.0);
Vector3 c = (1.0 - t)*Vector3::One() + t*Vector3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
else return Vector3::Zero();
}
}
}
return Vector3::Zero();
}
__device__ int LongestCommonPrefix(int i, int j, int numObjects, Triangle *d_list) {
if(i < 0 or i > numObjects - 1 or j < 0 or j > numObjects - 1) return -1;
int codeI = d_list[i].getMorton();
int codeJ = d_list[j].getMorton();
if(i == j) {
printf("Equals Longest\n");
return __clz(codeI ^ codeJ);
}
else return __clz(codeI ^ codeJ);
}
__device__ int findSplit(Triangle *d_list, int first, int last) {
if(first == last){
return -1;
}
int firstCode = d_list[first].getMorton();
int lastCode = d_list[last].getMorton();
int commonPrefix = __clz(firstCode ^ lastCode);
int split = first;
int step = last - first;
do {
step = (step + 1 ) >> 1;
int newSplit = split + step;
if(newSplit < last){
int splitCode = d_list[newSplit].getMorton();
int splitPrefix = __clz(firstCode ^ splitCode);
if(splitPrefix > commonPrefix){
split = newSplit;
}
}
} while (step > 1);
return split;
}
__device__ int2 determineRange(Triangle *d_list, int idx, int objs) {
int d = LongestCommonPrefix(idx, idx + 1, objs, d_list) -
LongestCommonPrefix(idx, idx - 1, objs, d_list) >= 0 ? 1 : -1;
int dmin = LongestCommonPrefix(idx, idx - d, objs, d_list);
int lmax = 2;
while(LongestCommonPrefix(idx, idx + lmax*d, objs, d_list) > dmin){
lmax <<=1;
}
int l = 0;
int div = 2;
for(int t = lmax/div; t >= 1; t >>= 1) {
if(LongestCommonPrefix(idx, idx + (l + t) * d, objs, d_list) > dmin) l += t;
}
int jdx = idx + l * d;
if(jdx < idx) return make_int2(jdx,idx);
else return make_int2(idx,jdx);
}
__global__ void setupCamera(Camera **d_cam, int nx, int ny, Camera cam) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*d_cam = new Camera(cam.getLookfrom(), cam.getLookat(), cam.getVUP(), cam.getFOV(), float(nx)/float(ny), cam.getAperture(), cam.getFocus(),0.0,0.1);
}
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state,unsigned long long seed) {
int num = blockIdx.x*blockDim.x + threadIdx.x;
int i = num%max_x;
int j = num/max_x;
if( (i >= max_x) || (j >= max_y) ) return;
int pixel_index = num;
curand_init((seed << 20) + pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void initLeafNodes(Node *leafNodes, int objs, Triangle *d_list) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
leafNodes[idx].obj = &d_list[idx];
leafNodes[idx].box = d_list[idx].getBox();
}
__global__ void boundingBoxBVH(Node *d_internalNodes, Node *d_leafNodes, int objs, int nodes, int *nodeCounter) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
Node *leaf = d_leafNodes + idx;
Node* current = leaf->parent;
int currentIdx = current - d_internalNodes;
int res = atomicAdd(nodeCounter + currentIdx, 1);
while (true) {
if(res == 0) return;
aabb leftBoundingBox = current->left->box;
aabb rightBoundingBox = current->right->box;
current->box = surrounding_box(leftBoundingBox, rightBoundingBox);
if (current == d_internalNodes) {
return;
}
current = current->parent;
currentIdx = current - d_internalNodes;
res = atomicAdd(nodeCounter + currentIdx, 1);
}
}
__global__ void constructBVH(Node *d_internalNodes, Node *leafNodes, int objs, Triangle *d_list) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= objs) return;
int2 range = determineRange(d_list, idx, objs+1);
int first = range.x;
int last = range.y;
int split = findSplit(d_list, first, last);
if(split == -1){
split = (first+last) >> 1;
++last;
}
Node *current = d_internalNodes + idx;
if(split == first) {
current->left = leafNodes + split;
current->left->isLeaf = true;
current->left->isLeft = true;
(leafNodes + split)->parent = current;
}
else{
current->left = d_internalNodes + split;
current->left->isLeft = true;
(d_internalNodes + split)->parent = current;
}
if (split + 1 == last) {
current->right = leafNodes + split + 1;
current->right->isLeaf = true;
current->right->isRight = true;
(leafNodes + split + 1)->parent = current;
}
else{
current->right = d_internalNodes + split + 1;
current->right->isRight = true;
(d_internalNodes + split + 1)->parent = current;
}
}
__global__ void render(Vector3 *fb, int max_x, int max_y, int ns, Camera **cam, Node *world, curandState *d_rand_state, int depth, bool light, bool skybox, Skybox *sky, bool oneTex, unsigned char ** d_textures) {
int num = blockIdx.x*blockDim.x + threadIdx.x;
int i = num%max_x;
int j = num/max_x;
curandState local_random;
int pixel_index = num;
local_random = d_rand_state[pixel_index];
Vector3 col(0,0,0);
for(int s = 0; s < ns; s++){
float u = float(i + cuRandom) / float(max_x);
float v = float(j + cuRandom) / float(max_y);
Ray r = (*cam)->get_ray(u, v, &local_random);
col += color(r, world, depth, light, skybox, &local_random, sky, oneTex, d_textures);
}
d_rand_state[pixel_index] = local_random;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void checkBVH(Node *d_internalNodes, Node *d_leaves, int objs){
if (threadIdx.x == 0 && blockIdx.x == 0){
printf("Checking BVH...\n");
for(int i = 0; i < objs; i++){
if(!d_leaves[i].parent){
printf("Leaf without parent %d\n",i);
}
}
for(int i = 0; i < objs-1; i++){
if(!d_internalNodes[i].left){
printf("Internal without left %d\n",i);
}
if(!d_internalNodes[i].right){
printf("Internal without right %d\n",i);
}
if(!d_internalNodes[i].parent){
printf("Internal without parent %d\n",i);
}
}
printf("BVH checked!\n");
}
}
int main(int argc, char **argv) {
cudaDeviceReset();
properties();
cudaEvent_t E0, E1;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
float totalTime;
int nx, ny, ns, depth, dist, diameterBi, diameterMean, diameterMedian, nthreads, numGPUs;
bool light, random, filter, skybox, oneTex;
float gs, gr;
std::string filename, image;
parse_argv(argc, argv, nx, ny, ns, depth, dist, image, filename, light, random, filter, diameterBi, gs, gr, diameterMean, diameterMedian, skybox, oneTex, nthreads, numGPUs, 1);
/* Seed for CUDA cuRandom */
unsigned long long int seed = 1000;
/* #pixels of the image */
int num_pixels = nx*ny;
int size = 0;
int num_textures = 0;
/* Host variables */
float fb_size = num_pixels*sizeof(Vector3);
float drand_size = num_pixels*sizeof(curandState);
float cam_size = sizeof(Camera*);
Vector3 *h_frameBuffer;
Node *h_internalNodes;
int *h_nodeCounter;
int blocks = (nx * ny)/(numGPUs * nthreads);
/* Create world */
Scene scene(dist, nx, ny);
if(random) scene.loadScene(TRIANGL);
else scene.loadScene(FFILE,filename,oneTex);
Triangle *h_objects = scene.getObjects();
Skybox *h_skybox = scene.getSkybox();
unsigned char **textures;
unsigned char **h_textures;
Vector3 *textureSizes;
if(oneTex){
textures = scene.getTextures();
textureSizes = scene.getTextureSizes();
num_textures = scene.getNumTextures();
}
size = scene.getSize();
float ob_size = size*sizeof(Triangle);
int threads = nthreads;
while(size < threads) threads /= 2;
int blocks2 = (size+threads-1)/(threads);
std::cout << "Creating " << image << " with (" << nx << "," << ny << ") pixels with " << nthreads << " threads, using " << numGPUs << " GPUs." << std::endl;
std::cout << "With " << ns << " iterations for AntiAliasing and depth of " << depth << "." << std::endl;
std::cout << "The world have " << size << " objects." << std::endl;
if(light) std::cout << "Ambient light ON" << std::endl;
else std::cout << "Ambient light OFF" << std::endl;
/* Device variables */
Vector3 *d_frameBuffer;
Triangle *d_objects;
Camera **d_cam;
curandState *d_rand_state;
Node *d_internalNodes;
Node *d_leafNodes;
int *d_nodeCounter;
Skybox *d_skybox;
unsigned char **d_textures;
float internal_size = (size-1)*sizeof(Node);
float leaves_size = size*sizeof(Node);
/* Allocate Memory Host */
cudaMallocHost((Vector3**)&h_frameBuffer, fb_size);
cudaMallocHost((Node **)&h_internalNodes, internal_size);
cudaMallocHost((int **) &h_nodeCounter, sizeof(int)*size);
/* Allocate memory on Device */
cudaMallocManaged((void **)&d_frameBuffer, fb_size);
cudaMalloc((void **)&d_objects, ob_size);
cudaMalloc((void **)&d_cam, cam_size);
cudaMalloc((void **)&d_rand_state, drand_size);
cudaMalloc((void **)&d_internalNodes, internal_size);
cudaMalloc((void **)&d_leafNodes, leaves_size);
cudaMalloc((void **)&d_nodeCounter, sizeof(int)*size - 1);
cudaMemset(d_nodeCounter, 0, sizeof(int)*size - 1);
cudaMalloc((void **)&d_skybox, sizeof(Skybox));
if(num_textures > 0){
int count = 0;
for(int i = 0; i < num_textures; i++){
Vector3 p = textureSizes[i];
count += (p[0]*p[1]*p[2]);
}
h_textures = (unsigned char **) malloc(sizeof(unsigned char)*count);
std::cout << "Binding textures" << std::endl;
for(int i = 0; i < num_textures; i++){
std::cout << "Texture " << i << std::endl;
Vector3 p = textureSizes[i];
unsigned char *image = textures[i];
cudaMalloc((void**)&h_textures[i], sizeof(unsigned char)*p[0]*p[1]*p[2]);
cudaMemcpy(h_textures[i], image, sizeof(unsigned char)*p[0]*p[1]*p[2], cudaMemcpyHostToDevice);
}
cudaMalloc(&d_textures, sizeof(unsigned char *) * num_textures);
cudaMemcpy(d_textures, h_textures, sizeof(unsigned char*) * num_textures, cudaMemcpyHostToDevice);
}
h_skybox->hostToDevice(0);
cudaEventRecord(E0,0);
cudaEventSynchronize(E0);
cudaMemcpy(d_skybox, h_skybox, sizeof(Skybox), cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
cudaMemcpy(d_objects, h_objects, ob_size, cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
setupCamera<<<1,1>>>(d_cam,nx,ny, scene.getCamera());
checkCudaErrors(cudaGetLastError());
render_init<<<blocks, nthreads>>>(nx, ny, d_rand_state, seed);
checkCudaErrors(cudaGetLastError());
initLeafNodes<<<blocks2,threads>>>(d_leafNodes, size, d_objects);
checkCudaErrors(cudaGetLastError());
constructBVH<<<blocks2,threads>>>(d_internalNodes, d_leafNodes, size-1, d_objects);
checkCudaErrors(cudaGetLastError());
// checkBVH<<<1,1>>>(d_internalNodes, d_leafNodes, size);
// checkCudaErrors(cudaGetLastError());
boundingBoxBVH<<<blocks2,threads>>>(d_internalNodes, d_leafNodes, size, size*2-1, d_nodeCounter);
checkCudaErrors(cudaGetLastError());
render<<<blocks, nthreads>>>(d_frameBuffer, nx, ny, ns, d_cam, d_internalNodes, d_rand_state, depth, light, skybox, d_skybox, oneTex, d_textures);
checkCudaErrors(cudaGetLastError());
/* Copiamos del Device al Host*/
cudaMemcpy(h_frameBuffer, d_frameBuffer, fb_size, cudaMemcpyDeviceToHost);
checkCudaErrors(cudaGetLastError());
cudaEventRecord(E1,0);
checkCudaErrors(cudaGetLastError());
cudaEventSynchronize(E1);
checkCudaErrors(cudaGetLastError());
cudaEventElapsedTime(&totalTime,E0,E1);
checkCudaErrors(cudaGetLastError());
std::cout << "Total time: " << totalTime << " milisegs. " << std::endl;
std::cout << "Generating file image..." << std::endl;
uint8_t *data = new uint8_t[nx*ny*3];
int count = 0;
for(int j = ny-1; j >= 0; j--){
for(int i = 0; i < nx; i++){
size_t pixel_index = j*nx + i;
Vector3 col = h_frameBuffer[pixel_index];
int ir = int(255.99*col.r());
int ig = int(255.99*col.g());
int ib = int(255.99*col.b());
data[count++] = ir;
data[count++] = ig;
data[count++] = ib;
}
}
cudaFree(d_cam);
cudaFree(d_objects);
cudaFree(d_rand_state);
cudaFree(d_frameBuffer);
cudaFree(d_nodeCounter);
cudaFree(d_leafNodes);
cudaFree(d_internalNodes);
cudaEventDestroy(E0);
cudaEventDestroy(E1);
image = "../Resources/Images/GPU_BVH_IT/"+image;
stbi_write_png(image.c_str(), nx, ny, 3, data, nx*3);
if(filter){
std::cout << "Filtering image using bilateral filter with Gs = " << gs << " and Gr = " << gr << " and window of diameter " << diameterBi << std::endl;
std::string filenameFiltered = image.substr(0, image.length()-4) + "_bilateral_filter.png";
int sx, sy, sc;
unsigned char *imageData = stbi_load(image.c_str(), &sx, &sy, &sc, 0);
unsigned char *imageFiltered = new unsigned char[sx*sy*3];;
bilateralFilter(diameterBi, sx, sy, imageData, imageFiltered, gs, gr);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
std::cout << "Filtering image using median filter with window of diameter " << diameterMedian << std::endl;
filenameFiltered = image.substr(0, image.length()-4) + "_median_filter.png";
medianFilter(diameterMedian, sx, sy, imageData, imageFiltered);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
std::cout << "Filtering image using mean filter with window of diameter " << diameterMean << std::endl;
filenameFiltered = image.substr(0, image.length()-4) + "_mean_filter.png";
meanFilter(diameterMean,sx, sy, imageData, imageFiltered);
stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3);
}
}
|
def935a57665c5a5e7a2b0078d2014071d35483c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduction/SumReduction.cuh"
__global__ void backwardExpansionKernel(
int batchSize,
int* lengths,
int numberIterations,
int numberRows,
int numberEntries,
int numberOfWarpsPerBlocks,
int filterHeight,
int filterWidth,
int filterLength,
int maximumConvolutions,
int convolutionsPerRow,
float* gradient,
float* result) {
int indexInstance = blockIdx.x;
int startInstanceWithinBatch = indexInstance * numberEntries;
int firstEntryInYBlockWithinInstance = blockIdx.y * numberOfWarpsPerBlocks;
int indexEntryWithinYBlock = threadIdx.x / warpSize;
int indexEntryWithinInstance = firstEntryInYBlockWithinInstance + indexEntryWithinYBlock;
if(indexEntryWithinInstance < numberEntries) {
int length = lengths[indexInstance];
int indexEntryWithinBatch = startInstanceWithinBatch + indexEntryWithinInstance;
result[indexEntryWithinBatch] = nanf("NaN");
if(indexInstance < batchSize && indexEntryWithinInstance < length * numberRows) {
int laneId = threadIdx.x % warpSize;
int startFilter = laneId * numberIterations;
int endFilter = min(startFilter + numberIterations, filterLength);
int indexRowWithinInstance = indexEntryWithinInstance % numberRows;
int indexColumnWithinInstance = indexEntryWithinInstance / numberRows;
float thisValue = 0.0;
for(int indexFilter = startFilter; indexFilter < endFilter; indexFilter++) {
int indexRowWithinFilter = indexFilter % filterHeight;
int indexColumnWithinFilter = indexFilter / filterHeight;
// At which row does the convolution for the given filter position start
int firstRowInConvolution = indexRowWithinInstance - indexRowWithinFilter;
// At which row does the convolution for the given filter position end
int lastRowInConvolution = firstRowInConvolution + filterHeight - 1;
// At which column does the convolution for the given filter position start
int firstColumnInConvolution = indexColumnWithinInstance - indexColumnWithinFilter;
// At which column does the convolution for the given filter position end
int lastColumnInConvolution = firstColumnInConvolution + filterWidth - 1;
float thisValueInIteration;
if(firstRowInConvolution >= 0 && lastRowInConvolution < numberRows &&
firstColumnInConvolution >= 0 && lastColumnInConvolution < lengths[indexInstance]) {
int indexConvolution = firstColumnInConvolution * convolutionsPerRow + firstRowInConvolution;
int indexGradient = indexInstance * maximumConvolutions * filterLength + indexConvolution * filterLength + indexColumnWithinFilter * filterHeight + indexRowWithinFilter;
thisValueInIteration = gradient[indexGradient];
}
else {
thisValueInIteration = 0.0;
}
thisValue += thisValueInIteration;
}
float sum = warpReduceToSum(thisValue);
if(laneId == 0) {
result[indexEntryWithinBatch] = sum;
}
}
}
}
| def935a57665c5a5e7a2b0078d2014071d35483c.cu | #include "reduction/SumReduction.cuh"
__global__ void backwardExpansionKernel(
int batchSize,
int* lengths,
int numberIterations,
int numberRows,
int numberEntries,
int numberOfWarpsPerBlocks,
int filterHeight,
int filterWidth,
int filterLength,
int maximumConvolutions,
int convolutionsPerRow,
float* gradient,
float* result) {
int indexInstance = blockIdx.x;
int startInstanceWithinBatch = indexInstance * numberEntries;
int firstEntryInYBlockWithinInstance = blockIdx.y * numberOfWarpsPerBlocks;
int indexEntryWithinYBlock = threadIdx.x / warpSize;
int indexEntryWithinInstance = firstEntryInYBlockWithinInstance + indexEntryWithinYBlock;
if(indexEntryWithinInstance < numberEntries) {
int length = lengths[indexInstance];
int indexEntryWithinBatch = startInstanceWithinBatch + indexEntryWithinInstance;
result[indexEntryWithinBatch] = nanf("NaN");
if(indexInstance < batchSize && indexEntryWithinInstance < length * numberRows) {
int laneId = threadIdx.x % warpSize;
int startFilter = laneId * numberIterations;
int endFilter = min(startFilter + numberIterations, filterLength);
int indexRowWithinInstance = indexEntryWithinInstance % numberRows;
int indexColumnWithinInstance = indexEntryWithinInstance / numberRows;
float thisValue = 0.0;
for(int indexFilter = startFilter; indexFilter < endFilter; indexFilter++) {
int indexRowWithinFilter = indexFilter % filterHeight;
int indexColumnWithinFilter = indexFilter / filterHeight;
// At which row does the convolution for the given filter position start
int firstRowInConvolution = indexRowWithinInstance - indexRowWithinFilter;
// At which row does the convolution for the given filter position end
int lastRowInConvolution = firstRowInConvolution + filterHeight - 1;
// At which column does the convolution for the given filter position start
int firstColumnInConvolution = indexColumnWithinInstance - indexColumnWithinFilter;
// At which column does the convolution for the given filter position end
int lastColumnInConvolution = firstColumnInConvolution + filterWidth - 1;
float thisValueInIteration;
if(firstRowInConvolution >= 0 && lastRowInConvolution < numberRows &&
firstColumnInConvolution >= 0 && lastColumnInConvolution < lengths[indexInstance]) {
int indexConvolution = firstColumnInConvolution * convolutionsPerRow + firstRowInConvolution;
int indexGradient = indexInstance * maximumConvolutions * filterLength + indexConvolution * filterLength + indexColumnWithinFilter * filterHeight + indexRowWithinFilter;
thisValueInIteration = gradient[indexGradient];
}
else {
thisValueInIteration = 0.0;
}
thisValue += thisValueInIteration;
}
float sum = warpReduceToSum(thisValue);
if(laneId == 0) {
result[indexEntryWithinBatch] = sum;
}
}
}
}
|
9c5db079783f3d2c8e0122a9002dc4264186f286.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "utilCuda.h"
#include "timer.h"
#include "lock.h"
#include "../Class/interface.h"
#include "../Class/neuralNetwork.h"
#include "../Class/statistic.h"
#include "../Class/countNetwork.h"
#define RATIO 22
#define MIN_RATIO 2
//cuda kernel prototypes
__global__ void correct ( unsigned char * d_vectorFlags , unsigned char * d_ptr, unsigned char *d_desiredOutput);
__global__ void reset ( unsigned char * d_vectorFlags , unsigned char *d_ptr);
__global__ void recognize( unsigned char * d_vectorNeuron, unsigned char *d_vectorFlags,
unsigned char * d_pattern , int *d_countHit, unsigned char *d_ptr,
unsigned char * d_arrayCategory , unsigned char* d_idsNeuron,Lock lock);
// methods prototype
template<class T>
inline bool equal(T a, T b);
template<class T>
bool compare(T array [] , int sizeArray);
void calculateStatistic(const float & currentTime, Statistic * & statistic, kernels kernel);
void debugTimer( GpuTimer timer);
//--------------------------------------Metodos Main-------------------------------
extern "C"
void boot(NeuralNetwork * & neuralSenses,const SizeNet & sizeNet, Statistic * & statistic, OrderNetwork * & orderNet) {
unsigned char * d_vectorZero;
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(hipMalloc(&d_vectorZero,sizeNet.sizeVectorNeuron));
// initialize the memory block to zero (0)
timer.Start();
checkCudaErrors(hipMemset(d_vectorZero , 0 , sizeNet.sizeVectorNeuron));
timer.Stop();
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
calculateStatistic(timer.Elapsed(),statistic,BOOT);
// copy from device to host
checkCudaErrors(hipMemcpy(neuralSenses[ SIGHT ].vectorNeuron, d_vectorZero, sizeNet.sizeVectorNeuron, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(neuralSenses[ SIGHT ].vectorFlags , d_vectorZero, sizeNet.sizevectorFlags , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(neuralSenses[ SIGHT ].binaryCharacteristic,d_vectorZero,sizeNet.sizeBinaryCharacteristic,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(neuralSenses[ HEARING ].vectorNeuron, d_vectorZero, sizeNet.sizeVectorNeuron, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(neuralSenses[ HEARING ].vectorFlags , d_vectorZero, sizeNet.sizevectorFlags , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(neuralSenses[ HEARING ].binaryCharacteristic,d_vectorZero,sizeNet.sizeBinaryCharacteristic,hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(orderNet->countNet[0].vectorNetworkCount , d_vectorZero, sizeNet.sizeVectorNeuron, hipMemcpyDeviceToHost);
/* for(int i=0; i<sizeNet.numOrderNeurons; i++) {
checkCudaErrors(hipMemcpy(orderNet->countNet[i].vectorNetworkCount , d_vectorZero, sizeNet.sizeVectorNeuron, hipMemcpyDeviceToHost));
//checkCudaErrors(hipMemcpy(orderNet->countNet[i].vectorFlagsCount , d_vectorZero, sizeNet.sizevectorFlags, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(orderNet->countNet[i].vectorPointerCount , d_vectorZero, sizeNet.sizevectorFlags, hipMemcpyDeviceToHost));
}*/
// Free memory on device Reserved
checkCudaErrors(hipFree(d_vectorZero));
}
extern "C"
stateNeuralNetwork recognize(NeuralNetwork * neuralSenses, const SizeNet & sizeNet,
unsigned char * h_pattern, Interface * interface, Statistic *& statistic)
{
int * d_countHit;
unsigned char * d_arrayCategory,*d_idsNeuron;
unsigned char * d_vectorNeuron,* d_vectorFlags,*d_pattern,*d_ptr;
stateNeuralNetwork state;
dim3 blockSize (SIZE_CHARACTERISTIC);
dim3 gridSize ( (*neuralSenses->ptr) +1 );
GpuTimer timer;
Lock lock;
*(interface->hits) = 0;
// It allocates memory on the device
checkCudaErrors(hipMalloc( &d_vectorNeuron, sizeNet.sizeVectorNeuron) );
checkCudaErrors(hipMalloc( &d_vectorFlags , sizeNet.sizevectorFlags ) );
checkCudaErrors(hipMalloc( &d_pattern , sizeof(unsigned char) * SIZE_CHARACTERISTIC));
checkCudaErrors(hipMalloc( &d_arrayCategory , sizeof(unsigned char) * (*(neuralSenses->ptr))));
checkCudaErrors(hipMalloc( &d_idsNeuron , sizeof(unsigned char) * (*(neuralSenses->ptr))));
checkCudaErrors(hipMalloc( &d_ptr , sizeof(unsigned char)));
checkCudaErrors(hipMalloc( &d_countHit , sizeof(int)));
// copy from host to device
checkCudaErrors( hipMemcpy( d_vectorNeuron, neuralSenses->vectorNeuron ,sizeNet.sizeVectorNeuron, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeNet.sizevectorFlags , hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_pattern , h_pattern ,sizeof(unsigned char)*SIZE_CHARACTERISTIC, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_countHit , interface->hits ,sizeof(int) , hipMemcpyHostToDevice ) );
//call kernel reconize
timer.Start();
hipLaunchKernelGGL(( recognize), dim3(gridSize),dim3(blockSize), 0, 0, d_vectorNeuron,d_vectorFlags,d_pattern,d_countHit,d_ptr,d_arrayCategory,d_idsNeuron,lock);
timer.Stop();
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
calculateStatistic(timer.Elapsed(),statistic,RECOGNIZE);
// copy from device to host
checkCudaErrors( hipMemcpy( interface->hits, d_countHit, sizeof(int), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy( neuralSenses->vectorNeuron , d_vectorNeuron, sizeNet.sizeVectorNeuron, hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeNet.sizevectorFlags , hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy( neuralSenses->ptr , d_ptr , sizeof(unsigned char) , hipMemcpyDeviceToHost ) );
interface->freeMem();
interface->setHit();
checkCudaErrors(hipMemcpy(interface->arrayCategory,d_arrayCategory ,sizeof(unsigned char)*(* (interface->hits)),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(interface->id ,d_idsNeuron ,sizeof(unsigned char)*(* (interface->hits)),hipMemcpyDeviceToHost));
if(* (interface->hits) > 1){
if(* (interface->hits) == 2)
state = equal(interface->arrayCategory[0],interface->arrayCategory[1])? IS_HIT : DIFF;
else
state = compare(interface->arrayCategory,* (interface->hits)) ? IS_HIT : DIFF;
}
else if(* (interface->hits) == 1)
state=IS_HIT;
else
state=NO_HIT;
// Free memory on device Reserved
checkCudaErrors(hipFree(d_vectorNeuron));
checkCudaErrors(hipFree(d_vectorFlags));
checkCudaErrors(hipFree(d_pattern));
checkCudaErrors(hipFree(d_countHit));
checkCudaErrors(hipFree(d_ptr));
checkCudaErrors(hipFree(d_arrayCategory));
checkCudaErrors(hipFree(d_idsNeuron));
lock.freeMem();
return state;
}
extern "C"
void correct(NeuralNetwork * neuralSenses , const SizeNet & sizeNet,
unsigned char desiredOutput, int maxThreadsPerBlock, Statistic *&statistic){
unsigned char * d_desiredOutput;
unsigned char * d_vectorFlags,* d_ptr;
dim3 blockSize (maxThreadsPerBlock);
int numblock= (*(neuralSenses->ptr) % maxThreadsPerBlock == 0) ?
*(neuralSenses->ptr) / maxThreadsPerBlock:
*(neuralSenses->ptr) / maxThreadsPerBlock + 1;
dim3 gridSize(numblock);
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(hipMalloc(&d_vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS* (*neuralSenses->ptr)));
checkCudaErrors(hipMalloc(&d_desiredOutput,sizeof(unsigned char)));
checkCudaErrors(hipMalloc(&d_ptr,sizeof(unsigned char)));
// copy from host to device
checkCudaErrors( hipMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_desiredOutput, &desiredOutput ,sizeof(unsigned char) , hipMemcpyHostToDevice ) );
timer.Start();
//call kernel reconize
hipLaunchKernelGGL(( correct), dim3(gridSize),dim3(blockSize), 0, 0, d_vectorFlags,d_ptr,d_desiredOutput);
timer.Stop();
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
calculateStatistic(timer.Elapsed(),statistic,CORRECT);
// copy from device to host
checkCudaErrors( hipMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeof(unsigned char) * SIZE_FLAGS *(*neuralSenses->ptr), hipMemcpyDeviceToHost ) );
// Free memory on device Reserved
checkCudaErrors(hipFree(d_vectorFlags));
checkCudaErrors(hipFree(d_ptr));
checkCudaErrors(hipFree(d_desiredOutput));
}
extern "C"
void reset(NeuralNetwork * neuralSenses , const SizeNet & sizeNet, int maxThreadsPerBlock, Statistic *&statistic)
{
unsigned char * d_vectorFlags,* d_ptr;
dim3 blockSize (maxThreadsPerBlock);
int numblock = (*(neuralSenses->ptr) % maxThreadsPerBlock == 0) ?
*(neuralSenses->ptr) / maxThreadsPerBlock:
*(neuralSenses->ptr) / maxThreadsPerBlock + 1;
dim3 gridSize(numblock);
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(hipMalloc(&d_vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr)));
checkCudaErrors(hipMalloc(&d_ptr,sizeof(unsigned char)));
// copy from host to device
checkCudaErrors( hipMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , hipMemcpyHostToDevice ) );
timer.Start();
//call kernel reconize
hipLaunchKernelGGL(( reset), dim3(gridSize),dim3(blockSize), 0, 0, d_vectorFlags,d_ptr);
timer.Stop();
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
calculateStatistic(timer.Elapsed(),statistic,RESET);
// copy from device to host
checkCudaErrors( hipMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), hipMemcpyDeviceToHost ) );
// Free memory on device Reserved
checkCudaErrors(hipFree(d_vectorFlags));
checkCudaErrors(hipFree(d_ptr));
}
// methods
template<class T>
bool equal(T a, T b){
return (a==b)? true : false;
}
template<class T>
bool compare(T array[], int sizeArray)
{
T element=array[0];
for (register int i = 1; i < sizeArray; i++) {
if(!equal(element,array[i]))
return false;
}
return true;
}
//cuda kernel
__global__ void recognize(unsigned char * d_vectorNeuron, unsigned char *d_vectorFlags,
unsigned char *d_pattern, int *d_countHit, unsigned char *d_ptr,
unsigned char *d_arrayCategory, unsigned char *d_idsNeuron, Lock lock){
__shared__ unsigned char sharedVectorNeuron [SIZE_CHARACTERISTIC];
__shared__ unsigned char sharedVectorFlags [SIZE_FLAGS];
__shared__ unsigned char sharedPattern [SIZE_CHARACTERISTIC];
__shared__ int sharedDistanceManhattan[SIZE_CHARACTERISTIC];
int vectorIndex = threadIdx.x + SIZE_CHARACTERISTIC * blockIdx.x;
int flagIndex = threadIdx.x + SIZE_FLAGS * blockIdx.x;
int threadIndex = threadIdx.x;
unsigned char ptr= *d_ptr;
sharedVectorNeuron[threadIndex] = d_vectorNeuron [vectorIndex];
sharedPattern [threadIndex] = d_pattern [threadIndex];
if(threadIndex < SIZE_FLAGS)
sharedVectorFlags[threadIndex]=d_vectorFlags[flagIndex];
__syncthreads(); // make sure entire block is loaded!
if(blockIdx.x == ptr) //si estoy en la neurona lista para aprender copio el patron
{
d_vectorNeuron[vectorIndex] = sharedPattern[threadIndex];
if(threadIndex == 0)
d_vectorFlags [ptr * SIZE_FLAGS + RAT]= RATIO;
}
else if(sharedVectorFlags[KNW] == 1 && sharedVectorFlags[DGR]==0)
{
sharedDistanceManhattan[threadIndex]= fabsf(sharedPattern[threadIndex]-sharedVectorNeuron[threadIndex]);
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIndex < s)
sharedDistanceManhattan[threadIndex]+= sharedDistanceManhattan[threadIndex+s];
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (threadIndex == 0)
{
if(sharedDistanceManhattan[0] < sharedVectorFlags[RAT])
{
d_vectorFlags[SIZE_FLAGS * blockIdx.x + DIS] = sharedDistanceManhattan[0];
d_vectorFlags[SIZE_FLAGS * blockIdx.x + HIT] = 1;
lock.lock();
d_arrayCategory [*d_countHit] = sharedVectorFlags[CAT];
d_idsNeuron [*d_countHit] = blockIdx.x;
(*d_countHit)++;
lock.unlock();
}
}
}
}
__global__ void correct(unsigned char *d_vectorFlags, unsigned char *d_ptr, unsigned char *d_desiredOutput)
{
int indexGlobal=threadIdx.x + blockDim.x * blockIdx.x;
unsigned char ratio,hit,dis,category;
if(indexGlobal < *d_ptr)
{
hit = d_vectorFlags[indexGlobal * SIZE_FLAGS + HIT];
dis = d_vectorFlags[indexGlobal * SIZE_FLAGS + DIS];
category = d_vectorFlags[indexGlobal * SIZE_FLAGS + CAT];
ratio = d_vectorFlags[indexGlobal * SIZE_FLAGS + RAT];
if(hit==1 && category != *d_desiredOutput)
{
if(ratio > dis) //NECESARIO?
d_vectorFlags[ indexGlobal * SIZE_FLAGS + RAT ] = dis;
if(ratio < MIN_RATIO)
d_vectorFlags[ indexGlobal * SIZE_FLAGS + DGR ] = 1;
}
}
}
__global__ void reset(unsigned char *d_vectorFlags, unsigned char *d_ptr)
{
int indexGlobal=threadIdx.x + blockDim.x * blockIdx.x;
if(indexGlobal < *d_ptr)
d_vectorFlags[ indexGlobal * SIZE_FLAGS + HIT ] = 0;
}
void calculateStatistic(const float ¤tTime, Statistic *&statistic, kernels kernel)
{
statistic[kernel].numExecutions++;
statistic[kernel].accumulateTime += currentTime;
if(statistic[kernel].minTime >currentTime)
statistic[kernel].minTime = currentTime;
if(statistic [kernel].maxTime < currentTime)
statistic[kernel].maxTime =currentTime;
}
void debugTimer(GpuTimer timer){
int err = printf("\n%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
}
| 9c5db079783f3d2c8e0122a9002dc4264186f286.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "utilCuda.h"
#include "timer.h"
#include "lock.h"
#include "../Class/interface.h"
#include "../Class/neuralNetwork.h"
#include "../Class/statistic.h"
#include "../Class/countNetwork.h"
#define RATIO 22
#define MIN_RATIO 2
//cuda kernel prototypes
__global__ void correct ( unsigned char * d_vectorFlags , unsigned char * d_ptr, unsigned char *d_desiredOutput);
__global__ void reset ( unsigned char * d_vectorFlags , unsigned char *d_ptr);
__global__ void recognize( unsigned char * d_vectorNeuron, unsigned char *d_vectorFlags,
unsigned char * d_pattern , int *d_countHit, unsigned char *d_ptr,
unsigned char * d_arrayCategory , unsigned char* d_idsNeuron,Lock lock);
// methods prototype
template<class T>
inline bool equal(T a, T b);
template<class T>
bool compare(T array [] , int sizeArray);
void calculateStatistic(const float & currentTime, Statistic * & statistic, kernels kernel);
void debugTimer( GpuTimer timer);
//--------------------------------------Metodos Main-------------------------------
extern "C"
void boot(NeuralNetwork * & neuralSenses,const SizeNet & sizeNet, Statistic * & statistic, OrderNetwork * & orderNet) {
unsigned char * d_vectorZero;
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(cudaMalloc(&d_vectorZero,sizeNet.sizeVectorNeuron));
// initialize the memory block to zero (0)
timer.Start();
checkCudaErrors(cudaMemset(d_vectorZero , 0 , sizeNet.sizeVectorNeuron));
timer.Stop();
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
calculateStatistic(timer.Elapsed(),statistic,BOOT);
// copy from device to host
checkCudaErrors(cudaMemcpy(neuralSenses[ SIGHT ].vectorNeuron, d_vectorZero, sizeNet.sizeVectorNeuron, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(neuralSenses[ SIGHT ].vectorFlags , d_vectorZero, sizeNet.sizevectorFlags , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(neuralSenses[ SIGHT ].binaryCharacteristic,d_vectorZero,sizeNet.sizeBinaryCharacteristic,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(neuralSenses[ HEARING ].vectorNeuron, d_vectorZero, sizeNet.sizeVectorNeuron, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(neuralSenses[ HEARING ].vectorFlags , d_vectorZero, sizeNet.sizevectorFlags , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(neuralSenses[ HEARING ].binaryCharacteristic,d_vectorZero,sizeNet.sizeBinaryCharacteristic,cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(orderNet->countNet[0].vectorNetworkCount , d_vectorZero, sizeNet.sizeVectorNeuron, cudaMemcpyDeviceToHost);
/* for(int i=0; i<sizeNet.numOrderNeurons; i++) {
checkCudaErrors(cudaMemcpy(orderNet->countNet[i].vectorNetworkCount , d_vectorZero, sizeNet.sizeVectorNeuron, cudaMemcpyDeviceToHost));
//checkCudaErrors(cudaMemcpy(orderNet->countNet[i].vectorFlagsCount , d_vectorZero, sizeNet.sizevectorFlags, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(orderNet->countNet[i].vectorPointerCount , d_vectorZero, sizeNet.sizevectorFlags, cudaMemcpyDeviceToHost));
}*/
// Free memory on device Reserved
checkCudaErrors(cudaFree(d_vectorZero));
}
extern "C"
stateNeuralNetwork recognize(NeuralNetwork * neuralSenses, const SizeNet & sizeNet,
unsigned char * h_pattern, Interface * interface, Statistic *& statistic)
{
int * d_countHit;
unsigned char * d_arrayCategory,*d_idsNeuron;
unsigned char * d_vectorNeuron,* d_vectorFlags,*d_pattern,*d_ptr;
stateNeuralNetwork state;
dim3 blockSize (SIZE_CHARACTERISTIC);
dim3 gridSize ( (*neuralSenses->ptr) +1 );
GpuTimer timer;
Lock lock;
*(interface->hits) = 0;
// It allocates memory on the device
checkCudaErrors(cudaMalloc( &d_vectorNeuron, sizeNet.sizeVectorNeuron) );
checkCudaErrors(cudaMalloc( &d_vectorFlags , sizeNet.sizevectorFlags ) );
checkCudaErrors(cudaMalloc( &d_pattern , sizeof(unsigned char) * SIZE_CHARACTERISTIC));
checkCudaErrors(cudaMalloc( &d_arrayCategory , sizeof(unsigned char) * (*(neuralSenses->ptr))));
checkCudaErrors(cudaMalloc( &d_idsNeuron , sizeof(unsigned char) * (*(neuralSenses->ptr))));
checkCudaErrors(cudaMalloc( &d_ptr , sizeof(unsigned char)));
checkCudaErrors(cudaMalloc( &d_countHit , sizeof(int)));
// copy from host to device
checkCudaErrors( cudaMemcpy( d_vectorNeuron, neuralSenses->vectorNeuron ,sizeNet.sizeVectorNeuron, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeNet.sizevectorFlags , cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_pattern , h_pattern ,sizeof(unsigned char)*SIZE_CHARACTERISTIC, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_countHit , interface->hits ,sizeof(int) , cudaMemcpyHostToDevice ) );
//call kernel reconize
timer.Start();
recognize<<<gridSize,blockSize>>>(d_vectorNeuron,d_vectorFlags,d_pattern,d_countHit,d_ptr,d_arrayCategory,d_idsNeuron,lock);
timer.Stop();
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
calculateStatistic(timer.Elapsed(),statistic,RECOGNIZE);
// copy from device to host
checkCudaErrors( cudaMemcpy( interface->hits, d_countHit, sizeof(int), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy( neuralSenses->vectorNeuron , d_vectorNeuron, sizeNet.sizeVectorNeuron, cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeNet.sizevectorFlags , cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy( neuralSenses->ptr , d_ptr , sizeof(unsigned char) , cudaMemcpyDeviceToHost ) );
interface->freeMem();
interface->setHit();
checkCudaErrors(cudaMemcpy(interface->arrayCategory,d_arrayCategory ,sizeof(unsigned char)*(* (interface->hits)),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(interface->id ,d_idsNeuron ,sizeof(unsigned char)*(* (interface->hits)),cudaMemcpyDeviceToHost));
if(* (interface->hits) > 1){
if(* (interface->hits) == 2)
state = equal(interface->arrayCategory[0],interface->arrayCategory[1])? IS_HIT : DIFF;
else
state = compare(interface->arrayCategory,* (interface->hits)) ? IS_HIT : DIFF;
}
else if(* (interface->hits) == 1)
state=IS_HIT;
else
state=NO_HIT;
// Free memory on device Reserved
checkCudaErrors(cudaFree(d_vectorNeuron));
checkCudaErrors(cudaFree(d_vectorFlags));
checkCudaErrors(cudaFree(d_pattern));
checkCudaErrors(cudaFree(d_countHit));
checkCudaErrors(cudaFree(d_ptr));
checkCudaErrors(cudaFree(d_arrayCategory));
checkCudaErrors(cudaFree(d_idsNeuron));
lock.freeMem();
return state;
}
extern "C"
void correct(NeuralNetwork * neuralSenses , const SizeNet & sizeNet,
unsigned char desiredOutput, int maxThreadsPerBlock, Statistic *&statistic){
unsigned char * d_desiredOutput;
unsigned char * d_vectorFlags,* d_ptr;
dim3 blockSize (maxThreadsPerBlock);
int numblock= (*(neuralSenses->ptr) % maxThreadsPerBlock == 0) ?
*(neuralSenses->ptr) / maxThreadsPerBlock:
*(neuralSenses->ptr) / maxThreadsPerBlock + 1;
dim3 gridSize(numblock);
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(cudaMalloc(&d_vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS* (*neuralSenses->ptr)));
checkCudaErrors(cudaMalloc(&d_desiredOutput,sizeof(unsigned char)));
checkCudaErrors(cudaMalloc(&d_ptr,sizeof(unsigned char)));
// copy from host to device
checkCudaErrors( cudaMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_desiredOutput, &desiredOutput ,sizeof(unsigned char) , cudaMemcpyHostToDevice ) );
timer.Start();
//call kernel reconize
correct<<<gridSize,blockSize>>>(d_vectorFlags,d_ptr,d_desiredOutput);
timer.Stop();
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
calculateStatistic(timer.Elapsed(),statistic,CORRECT);
// copy from device to host
checkCudaErrors( cudaMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeof(unsigned char) * SIZE_FLAGS *(*neuralSenses->ptr), cudaMemcpyDeviceToHost ) );
// Free memory on device Reserved
checkCudaErrors(cudaFree(d_vectorFlags));
checkCudaErrors(cudaFree(d_ptr));
checkCudaErrors(cudaFree(d_desiredOutput));
}
extern "C"
void reset(NeuralNetwork * neuralSenses , const SizeNet & sizeNet, int maxThreadsPerBlock, Statistic *&statistic)
{
unsigned char * d_vectorFlags,* d_ptr;
dim3 blockSize (maxThreadsPerBlock);
int numblock = (*(neuralSenses->ptr) % maxThreadsPerBlock == 0) ?
*(neuralSenses->ptr) / maxThreadsPerBlock:
*(neuralSenses->ptr) / maxThreadsPerBlock + 1;
dim3 gridSize(numblock);
GpuTimer timer;
// It allocates memory on the device
checkCudaErrors(cudaMalloc(&d_vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr)));
checkCudaErrors(cudaMalloc(&d_ptr,sizeof(unsigned char)));
// copy from host to device
checkCudaErrors( cudaMemcpy( d_vectorFlags , neuralSenses->vectorFlags ,sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy( d_ptr , neuralSenses->ptr ,sizeof(unsigned char) , cudaMemcpyHostToDevice ) );
timer.Start();
//call kernel reconize
reset<<<gridSize,blockSize>>>(d_vectorFlags,d_ptr);
timer.Stop();
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
calculateStatistic(timer.Elapsed(),statistic,RESET);
// copy from device to host
checkCudaErrors( cudaMemcpy( neuralSenses->vectorFlags , d_vectorFlags , sizeof(unsigned char) * SIZE_FLAGS * (*neuralSenses->ptr), cudaMemcpyDeviceToHost ) );
// Free memory on device Reserved
checkCudaErrors(cudaFree(d_vectorFlags));
checkCudaErrors(cudaFree(d_ptr));
}
// methods
template<class T>
bool equal(T a, T b){
return (a==b)? true : false;
}
template<class T>
bool compare(T array[], int sizeArray)
{
T element=array[0];
for (register int i = 1; i < sizeArray; i++) {
if(!equal(element,array[i]))
return false;
}
return true;
}
//cuda kernel
__global__ void recognize(unsigned char * d_vectorNeuron, unsigned char *d_vectorFlags,
unsigned char *d_pattern, int *d_countHit, unsigned char *d_ptr,
unsigned char *d_arrayCategory, unsigned char *d_idsNeuron, Lock lock){
__shared__ unsigned char sharedVectorNeuron [SIZE_CHARACTERISTIC];
__shared__ unsigned char sharedVectorFlags [SIZE_FLAGS];
__shared__ unsigned char sharedPattern [SIZE_CHARACTERISTIC];
__shared__ int sharedDistanceManhattan[SIZE_CHARACTERISTIC];
int vectorIndex = threadIdx.x + SIZE_CHARACTERISTIC * blockIdx.x;
int flagIndex = threadIdx.x + SIZE_FLAGS * blockIdx.x;
int threadIndex = threadIdx.x;
unsigned char ptr= *d_ptr;
sharedVectorNeuron[threadIndex] = d_vectorNeuron [vectorIndex];
sharedPattern [threadIndex] = d_pattern [threadIndex];
if(threadIndex < SIZE_FLAGS)
sharedVectorFlags[threadIndex]=d_vectorFlags[flagIndex];
__syncthreads(); // make sure entire block is loaded!
if(blockIdx.x == ptr) //si estoy en la neurona lista para aprender copio el patron
{
d_vectorNeuron[vectorIndex] = sharedPattern[threadIndex];
if(threadIndex == 0)
d_vectorFlags [ptr * SIZE_FLAGS + RAT]= RATIO;
}
else if(sharedVectorFlags[KNW] == 1 && sharedVectorFlags[DGR]==0)
{
sharedDistanceManhattan[threadIndex]= fabsf(sharedPattern[threadIndex]-sharedVectorNeuron[threadIndex]);
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIndex < s)
sharedDistanceManhattan[threadIndex]+= sharedDistanceManhattan[threadIndex+s];
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (threadIndex == 0)
{
if(sharedDistanceManhattan[0] < sharedVectorFlags[RAT])
{
d_vectorFlags[SIZE_FLAGS * blockIdx.x + DIS] = sharedDistanceManhattan[0];
d_vectorFlags[SIZE_FLAGS * blockIdx.x + HIT] = 1;
lock.lock();
d_arrayCategory [*d_countHit] = sharedVectorFlags[CAT];
d_idsNeuron [*d_countHit] = blockIdx.x;
(*d_countHit)++;
lock.unlock();
}
}
}
}
__global__ void correct(unsigned char *d_vectorFlags, unsigned char *d_ptr, unsigned char *d_desiredOutput)
{
int indexGlobal=threadIdx.x + blockDim.x * blockIdx.x;
unsigned char ratio,hit,dis,category;
if(indexGlobal < *d_ptr)
{
hit = d_vectorFlags[indexGlobal * SIZE_FLAGS + HIT];
dis = d_vectorFlags[indexGlobal * SIZE_FLAGS + DIS];
category = d_vectorFlags[indexGlobal * SIZE_FLAGS + CAT];
ratio = d_vectorFlags[indexGlobal * SIZE_FLAGS + RAT];
if(hit==1 && category != *d_desiredOutput)
{
if(ratio > dis) //NECESARIO?
d_vectorFlags[ indexGlobal * SIZE_FLAGS + RAT ] = dis;
if(ratio < MIN_RATIO)
d_vectorFlags[ indexGlobal * SIZE_FLAGS + DGR ] = 1;
}
}
}
__global__ void reset(unsigned char *d_vectorFlags, unsigned char *d_ptr)
{
int indexGlobal=threadIdx.x + blockDim.x * blockIdx.x;
if(indexGlobal < *d_ptr)
d_vectorFlags[ indexGlobal * SIZE_FLAGS + HIT ] = 0;
}
void calculateStatistic(const float ¤tTime, Statistic *&statistic, kernels kernel)
{
statistic[kernel].numExecutions++;
statistic[kernel].accumulateTime += currentTime;
if(statistic[kernel].minTime >currentTime)
statistic[kernel].minTime = currentTime;
if(statistic [kernel].maxTime < currentTime)
statistic[kernel].maxTime =currentTime;
}
void debugTimer(GpuTimer timer){
int err = printf("\n%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
}
|
7d1e85f9b8aae30d022646c964febda5e045f2ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include "cuStopwatch.cu"
// Compute sum of integers from 0 to n-1
__global__ void trianglenumber(uint64_t* res, uint64_t n) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
// *res += tid;
atomicAdd(res, tid); // explain the problem
}
return;
}
int main() {
// Allocate memory
uint64_t *res_host, *res_dev;
hipHostMalloc((void**)&res_host, sizeof(uint64_t), hipHostMallocDefault);
hipMalloc((void**)&res_dev, sizeof(uint64_t));
hipMemset((void*)res_dev, 0, sizeof(uint64_t));
// Perform computation
cuStopwatch sw1;
sw1.start();
hipLaunchKernelGGL(( trianglenumber), dim3(1024), dim3(1024), 0, 0, res_dev, 1024*1024);
hipMemcpyAsync(res_host, res_dev, sizeof(uint64_t), hipMemcpyDeviceToHost);
printf("Computation time: %.4fms\n", sw1.stop());
printf("Result: %I64u\n", *res_host);
// Free memory
hipFree(res_dev);
hipHostFree(res_host);
return 0;
} | 7d1e85f9b8aae30d022646c964febda5e045f2ed.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include "cuStopwatch.cu"
// Compute sum of integers from 0 to n-1
__global__ void trianglenumber(uint64_t* res, uint64_t n) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
// *res += tid;
atomicAdd(res, tid); // explain the problem
}
return;
}
int main() {
// Allocate memory
uint64_t *res_host, *res_dev;
cudaHostAlloc((void**)&res_host, sizeof(uint64_t), cudaHostAllocDefault);
cudaMalloc((void**)&res_dev, sizeof(uint64_t));
cudaMemset((void*)res_dev, 0, sizeof(uint64_t));
// Perform computation
cuStopwatch sw1;
sw1.start();
trianglenumber<<<1024, 1024>>>(res_dev, 1024*1024);
cudaMemcpyAsync(res_host, res_dev, sizeof(uint64_t), cudaMemcpyDeviceToHost);
printf("Computation time: %.4fms\n", sw1.stop());
printf("Result: %I64u\n", *res_host);
// Free memory
cudaFree(res_dev);
cudaFreeHost(res_host);
return 0;
} |
b204712df43e905fae8145c548342b5abb28ab6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void linearLayerUpdateBias(float* dZ, float* b, int dZ_x_dim, int dZ_y_dim, int b_x_dim, float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_x_dim;
int dZ_y = index / dZ_x_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_x_dim + dZ_x] / dZ_x_dim));
}
} | b204712df43e905fae8145c548342b5abb28ab6b.cu | #include "includes.h"
__global__ void linearLayerUpdateBias(float* dZ, float* b, int dZ_x_dim, int dZ_y_dim, int b_x_dim, float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_x_dim;
int dZ_y = index / dZ_x_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_x_dim + dZ_x] / dZ_x_dim));
}
} |
d0df619810868b8b471048a39b72b794428e35a4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Parallel Processing Teaching Toolkit
* CUDA - Example 01
* Detect CUDA Devices
* https://github.com/javierip/parallel-processing-teaching-toolkit
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
} | d0df619810868b8b471048a39b72b794428e35a4.cu | /*
* Parallel Processing Teaching Toolkit
* CUDA - Example 01
* Detect CUDA Devices
* https://github.com/javierip/parallel-processing-teaching-toolkit
*/
#include <stdio.h>
#include <cuda_runtime.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
} |
bf0fa8afed3733cf350a91925f2f030a38dfb237.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct IfLessElseASSAImpl {
__device__ void operator()(int64_t /*i*/, T x1, T neg, T& out) { out = x1 < x2 ? pos : neg; }
T x2;
T pos;
};
} // namespace
void CudaDevice::IfLessElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) {
CheckDevicesCompatible(x1, neg, out);
CheckCudaError(hipSetDevice(index()));
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(IfLessElseASSAImpl<T>{static_cast<T>(x2), static_cast<T>(pos)}, x1, neg, out);
});
}
namespace {
template <typename T>
struct TanhImpl {
__device__ void operator()(int64_t /*i*/, T x, T& out) { out = std::tanh(x); }
};
} // namespace
void CudaDevice::Tanh(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(TanhImpl<T>{}, x, out);
});
}
} // namespace cuda
} // namespace chainerx
| bf0fa8afed3733cf350a91925f2f030a38dfb237.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct IfLessElseASSAImpl {
__device__ void operator()(int64_t /*i*/, T x1, T neg, T& out) { out = x1 < x2 ? pos : neg; }
T x2;
T pos;
};
} // namespace
void CudaDevice::IfLessElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) {
CheckDevicesCompatible(x1, neg, out);
CheckCudaError(cudaSetDevice(index()));
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, T>(IfLessElseASSAImpl<T>{static_cast<T>(x2), static_cast<T>(pos)}, x1, neg, out);
});
}
namespace {
template <typename T>
struct TanhImpl {
__device__ void operator()(int64_t /*i*/, T x, T& out) { out = std::tanh(x); }
};
} // namespace
void CudaDevice::Tanh(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(TanhImpl<T>{}, x, out);
});
}
} // namespace cuda
} // namespace chainerx
|
302564a3e95ca5c953f8622e87b1c0210cdd9afd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", blockIdx.x, threadIdx.x);
}
int main(){
hipLaunchKernelGGL(( hello), dim3(2),dim3(2), 0, 0, );
hipDeviceSynchronize();
}
| 302564a3e95ca5c953f8622e87b1c0210cdd9afd.cu | #include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", blockIdx.x, threadIdx.x);
}
int main(){
hello<<<2,2>>>();
cudaDeviceSynchronize();
}
|
9402ccb86c22332e81233a0ebf893af01e3e5603.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void saxpy(float a, float *x, float *y, float *out, size_t n)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
out[tid] = a * x[tid] + y[tid];
}
}
| 9402ccb86c22332e81233a0ebf893af01e3e5603.cu | extern "C" __global__
void saxpy(float a, float *x, float *y, float *out, size_t n)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
out[tid] = a * x[tid] + y[tid];
}
}
|
6eb9c1a66832c0cf42bfde5fc461f4e866254c5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR()
{
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
hipLaunchKernelGGL(( assign_kernel<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ptr, rect.volume(), 0.0f);
checkCUDA(hipDeviceSynchronize());
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW()
{
}
template<typename DT>
const DT* helperGetTensorPointerRO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<DT, 1> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 2:
{
TensorAccessorR<DT, 2> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 3:
{
TensorAccessorR<DT, 3> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 4:
{
TensorAccessorR<DT, 4> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerRW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerWO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<float, 4>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int32_t, 4>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorR<int64_t, 4>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<float, 4>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int32_t, 4>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
template class TensorAccessorW<int64_t, 4>;
template const float* helperGetTensorPointerRO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerRW(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerWO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
| 6eb9c1a66832c0cf42bfde5fc461f4e866254c5e.cu | #include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR()
{
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
assign_kernel<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>(
ptr, rect.volume(), 0.0f);
checkCUDA(cudaDeviceSynchronize());
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW()
{
}
template<typename DT>
const DT* helperGetTensorPointerRO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<DT, 1> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 2:
{
TensorAccessorR<DT, 2> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 3:
{
TensorAccessorR<DT, 3> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
case 4:
{
TensorAccessorR<DT, 4> acc(region, req, fid, ctx, runtime);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerRW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, true/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template<typename DT>
DT* helperGetTensorPointerWO(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<DT, 1> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 2:
{
TensorAccessorW<DT, 2> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 3:
{
TensorAccessorW<DT, 3> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
case 4:
{
TensorAccessorW<DT, 4> acc(region, req, fid, ctx, runtime, false/*readOutput*/);
return acc.ptr;
}
default:
{
fprintf(stderr, "Unsupported accessor dimension");
assert(false);
return NULL;
}
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<float, 4>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int32_t, 4>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorR<int64_t, 4>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<float, 4>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int32_t, 4>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
template class TensorAccessorW<int64_t, 4>;
template const float* helperGetTensorPointerRO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerRW(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
template float* helperGetTensorPointerWO(
PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime);
|
757272f458733df23d55078d7802ca7e07ab5168.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Jason He
Version: 1.0 20210521 Serial version.
Version: 2.0 20210523 MPI version using parallel fftw.
Version: 3.0 20210602 CUDA version using cufft. Use N*N block with 1 thread each.
Version: 3.1 20210603 Use more than 1 thread per block.
Version: 3.2 20210604 Included usage of shared memories. Didn't improve the speed.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hiprand/hiprand.h>
/*
Compute problem 37.5.1 in textbook on a 2D grid. Terminal time T=10000.
Inputs:
N: the size of the grid in both directions(should be evenly divisible by the number of processes)
c1: double precision parameter
c3: double precision parameter
M: number of time steps
s: long integer seed value(optional)
Outputs:
print arguments including the seed
output grid value into "CGL.out" that contain
the data at t=100k, k=0,1,2,...,10.
print run time and output it to runtime.dat
To run, E.g.
$ ./cgl 128 1.5 0.25 100000 12345
*/
const int threadsPerBlock = 256;//for NVIDIA TESLA K20c maximum is 1024
__device__ __constant__ int dev_N;
__device__ __constant__ double dev_dt;
__device__ __constant__ double dev_C1r;
__device__ __constant__ double dev_C1i;
__device__ __constant__ double dev_c3;
__global__ void cpy(hipfftDoubleComplex* dev_A, hipfftDoubleComplex* dev_A1){
//this kernel copy the grid values from A1 to A
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k < dev_N*dev_N)
dev_A[k] = dev_A1[k];
}
__global__ void spectrald(hipfftDoubleComplex* dev_A2){
//this kernel do the spectral derivative(laplacian)
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i < dev_N*dev_N){
int j = i/dev_N;
int jj = (j <= dev_N/2 ? j : j-dev_N);//the mode index
int k = i%dev_N;
int kk = (k <= dev_N/2 ? k : k-dev_N);//the mode index
int coeff = -(jj*jj + kk*kk);
dev_A2[i].x *= coeff;
dev_A2[i].y *= coeff;
}
}
__global__ void RK4(hipfftDoubleComplex* dev_A, hipfftDoubleComplex* dev_A1,
hipfftDoubleComplex* dev_A2, double tcoeff){
//this kernel do RK4 steps
__shared__ hipfftDoubleComplex A[threadsPerBlock];
__shared__ hipfftDoubleComplex A1[threadsPerBlock];
__shared__ hipfftDoubleComplex A2[threadsPerBlock];
/*maximum threadsPerBlock is 1024, so A,A1,A2 use up all the shared memories
on NVIDIA TESLA K20c in the maximum case.*/
int kk = threadIdx.x + blockIdx.x*blockDim.x;//global index
int k = threadIdx.x;//local index
if(kk < dev_N*dev_N){
A[k] = dev_A[kk];
A1[k] = dev_A1[kk];
A2[k] = dev_A2[kk];
double A1A1 = A1[k].x*A1[k].x + A1[k].y*A1[k].y;
double tmp = A1[k].x;//update the imag part first so store the real part temporarily
A1[k].x = A[k].x + tcoeff*dev_dt*(A1[k].x
+ (dev_C1r*A2[k].x - dev_C1i*A2[k].y)/dev_N/dev_N
- A1A1*(A1[k].x + dev_c3*A1[k].y));
A1[k].y = A[k].y + tcoeff*dev_dt*(A1[k].y
+ (dev_C1r*A2[k].y + dev_C1i*A2[k].x)/dev_N/dev_N
- A1A1*(A1[k].y - dev_c3*tmp));
dev_A1[kk] = A1[k];
}
}//rescale by 1/N^2 to correct the data after fft
int main(int argc, char* argv[])
{
#ifndef M_PI
const double M_PI = 4.0*atan(1.0);
#endif
//Choose gpu device
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.multiProcessorCount = 13;
hipChooseDevice(&dev, &prop);
hipSetDevice(dev);
//load input parameters
int argi = 0;
const int N = atol(argv[++argi]);
double c1 = atof(argv[++argi]);
double c3 = atof(argv[++argi]);
int M = atoi(argv[++argi]);
long int seed;
if (argi < argc-1){
seed = atol(argv[++argi]);
}
else{
seed = (long int)time(NULL);
}
srand48(seed);
printf( "N = %d\nc1 = %lf\nc3 = %lf\nM = %d\n"
"Starting seed = %ld\n", N, c1, c3, M, seed);
//parameters for calculation. refer to textbook Eq(37.15)
double dt = 10000.0/M;//terminal time = 10000
double C1r = 1.0/64/64;//L = 128pi
double C1i = C1r*c1;
hipMemcpyToSymbol(dev_dt, &dt, sizeof(double));
hipMemcpyToSymbol(dev_C1r, &C1r, sizeof(double));
hipMemcpyToSymbol(dev_C1i, &C1i, sizeof(double));
hipMemcpyToSymbol(dev_c3, &c3, sizeof(double));
//parameters for the blocks and grids
hipMemcpyToSymbol(dev_N, &N, sizeof(double));
const int blocksPerGrid = N*N/threadsPerBlock + ((N*N)%threadsPerBlock > 0 ? 1 : 0);
//initialize value of the grid on host
/*because we need to output the initial data, might as well
initialize it on the host, output to file, and then transfer the data to the device.*/
hipfftDoubleComplex *A;
A = (hipfftDoubleComplex*)malloc(N*N*sizeof(hipfftDoubleComplex));
for (int i = 0; i < N*N; i++){
A[i].x = 3*drand48() -1.5;
A[i].y = 3*drand48() -1.5;
}
//initialize value of the grid on device
hipfftDoubleComplex *dev_A, *dev_A1, *dev_A2;
hipMalloc((void**)&dev_A, N*N*sizeof(hipfftDoubleComplex));
hipMalloc((void**)&dev_A1, N*N*sizeof(hipfftDoubleComplex));
hipMalloc((void**)&dev_A2, N*N*sizeof(hipfftDoubleComplex));
hipMemcpy(dev_A1, A, N*N*sizeof(hipfftDoubleComplex), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1);
//file output the initial value
FILE* file = fopen("CGL.out","w");
fwrite(A, sizeof(hipfftDoubleComplex), N*N, file);
printf("Saved output at t=0\n");
//cufft plans
hipfftHandle plan;
hipfftPlan2d(&plan, N, N, HIPFFT_Z2Z);
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//main loop
for(int step=0; step<M; ++step){
//first step in RK4
hipfftExecZ2Z(plan, dev_A1, dev_A2, HIPFFT_FORWARD);
hipLaunchKernelGGL(( spectrald), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A2);
hipfftExecZ2Z(plan, dev_A2, dev_A2, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( RK4), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1, dev_A2, 0.25);
//second step in RK4
hipfftExecZ2Z(plan, dev_A1, dev_A2, HIPFFT_FORWARD);
hipLaunchKernelGGL(( spectrald), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A2);
hipfftExecZ2Z(plan, dev_A2, dev_A2, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( RK4), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1, dev_A2, 1.0/3);
//third step in RK4
hipfftExecZ2Z(plan, dev_A1, dev_A2, HIPFFT_FORWARD);
hipLaunchKernelGGL(( spectrald), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A2);
hipfftExecZ2Z(plan, dev_A2, dev_A2, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( RK4), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1, dev_A2, 0.5);
//fourth step in RK4
hipfftExecZ2Z(plan, dev_A1, dev_A2, HIPFFT_FORWARD);
hipLaunchKernelGGL(( spectrald), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A2);
hipfftExecZ2Z(plan, dev_A2, dev_A2, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( RK4), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1, dev_A2, 1.0);
//store the final value of this time step to A
hipLaunchKernelGGL(( cpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_A1);
//output to files
if((step+1)%(M/10) == 0)
{
hipMemcpy(A, dev_A, N*N*sizeof(hipfftDoubleComplex), hipMemcpyDeviceToHost);
fwrite(A, sizeof(hipfftDoubleComplex), N*N, file);
printf("Saved output at t=%d\n", (step+1)/10);
}
}//main loop
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//close files and free memories
fclose(file);
hipfftDestroy(plan);
hipFree(dev_A);
hipFree(dev_A1);
hipFree(dev_A2);
free(A);
//print runtime and output to the file
FILE* tfile = fopen("runtime.dat", "a");
fprintf(tfile, "%d %lf\n", N, elapsedTime);
printf("Time: %gms\n", elapsedTime);
fclose(tfile);
return 0;
} | 757272f458733df23d55078d7802ca7e07ab5168.cu | /*
Author: Jason He
Version: 1.0 20210521 Serial version.
Version: 2.0 20210523 MPI version using parallel fftw.
Version: 3.0 20210602 CUDA version using cufft. Use N*N block with 1 thread each.
Version: 3.1 20210603 Use more than 1 thread per block.
Version: 3.2 20210604 Included usage of shared memories. Didn't improve the speed.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cufft.h>
#include <curand.h>
/*
Compute problem 37.5.1 in textbook on a 2D grid. Terminal time T=10000.
Inputs:
N: the size of the grid in both directions(should be evenly divisible by the number of processes)
c1: double precision parameter
c3: double precision parameter
M: number of time steps
s: long integer seed value(optional)
Outputs:
print arguments including the seed
output grid value into "CGL.out" that contain
the data at t=100k, k=0,1,2,...,10.
print run time and output it to runtime.dat
To run, E.g.
$ ./cgl 128 1.5 0.25 100000 12345
*/
const int threadsPerBlock = 256;//for NVIDIA TESLA K20c maximum is 1024
__device__ __constant__ int dev_N;
__device__ __constant__ double dev_dt;
__device__ __constant__ double dev_C1r;
__device__ __constant__ double dev_C1i;
__device__ __constant__ double dev_c3;
__global__ void cpy(cufftDoubleComplex* dev_A, cufftDoubleComplex* dev_A1){
//this kernel copy the grid values from A1 to A
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k < dev_N*dev_N)
dev_A[k] = dev_A1[k];
}
__global__ void spectrald(cufftDoubleComplex* dev_A2){
//this kernel do the spectral derivative(laplacian)
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i < dev_N*dev_N){
int j = i/dev_N;
int jj = (j <= dev_N/2 ? j : j-dev_N);//the mode index
int k = i%dev_N;
int kk = (k <= dev_N/2 ? k : k-dev_N);//the mode index
int coeff = -(jj*jj + kk*kk);
dev_A2[i].x *= coeff;
dev_A2[i].y *= coeff;
}
}
__global__ void RK4(cufftDoubleComplex* dev_A, cufftDoubleComplex* dev_A1,
cufftDoubleComplex* dev_A2, double tcoeff){
//this kernel do RK4 steps
__shared__ cufftDoubleComplex A[threadsPerBlock];
__shared__ cufftDoubleComplex A1[threadsPerBlock];
__shared__ cufftDoubleComplex A2[threadsPerBlock];
/*maximum threadsPerBlock is 1024, so A,A1,A2 use up all the shared memories
on NVIDIA TESLA K20c in the maximum case.*/
int kk = threadIdx.x + blockIdx.x*blockDim.x;//global index
int k = threadIdx.x;//local index
if(kk < dev_N*dev_N){
A[k] = dev_A[kk];
A1[k] = dev_A1[kk];
A2[k] = dev_A2[kk];
double A1A1 = A1[k].x*A1[k].x + A1[k].y*A1[k].y;
double tmp = A1[k].x;//update the imag part first so store the real part temporarily
A1[k].x = A[k].x + tcoeff*dev_dt*(A1[k].x
+ (dev_C1r*A2[k].x - dev_C1i*A2[k].y)/dev_N/dev_N
- A1A1*(A1[k].x + dev_c3*A1[k].y));
A1[k].y = A[k].y + tcoeff*dev_dt*(A1[k].y
+ (dev_C1r*A2[k].y + dev_C1i*A2[k].x)/dev_N/dev_N
- A1A1*(A1[k].y - dev_c3*tmp));
dev_A1[kk] = A1[k];
}
}//rescale by 1/N^2 to correct the data after fft
int main(int argc, char* argv[])
{
#ifndef M_PI
const double M_PI = 4.0*atan(1.0);
#endif
//Choose gpu device
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.multiProcessorCount = 13;
cudaChooseDevice(&dev, &prop);
cudaSetDevice(dev);
//load input parameters
int argi = 0;
const int N = atol(argv[++argi]);
double c1 = atof(argv[++argi]);
double c3 = atof(argv[++argi]);
int M = atoi(argv[++argi]);
long int seed;
if (argi < argc-1){
seed = atol(argv[++argi]);
}
else{
seed = (long int)time(NULL);
}
srand48(seed);
printf( "N = %d\nc1 = %lf\nc3 = %lf\nM = %d\n"
"Starting seed = %ld\n", N, c1, c3, M, seed);
//parameters for calculation. refer to textbook Eq(37.15)
double dt = 10000.0/M;//terminal time = 10000
double C1r = 1.0/64/64;//L = 128pi
double C1i = C1r*c1;
cudaMemcpyToSymbol(dev_dt, &dt, sizeof(double));
cudaMemcpyToSymbol(dev_C1r, &C1r, sizeof(double));
cudaMemcpyToSymbol(dev_C1i, &C1i, sizeof(double));
cudaMemcpyToSymbol(dev_c3, &c3, sizeof(double));
//parameters for the blocks and grids
cudaMemcpyToSymbol(dev_N, &N, sizeof(double));
const int blocksPerGrid = N*N/threadsPerBlock + ((N*N)%threadsPerBlock > 0 ? 1 : 0);
//initialize value of the grid on host
/*because we need to output the initial data, might as well
initialize it on the host, output to file, and then transfer the data to the device.*/
cufftDoubleComplex *A;
A = (cufftDoubleComplex*)malloc(N*N*sizeof(cufftDoubleComplex));
for (int i = 0; i < N*N; i++){
A[i].x = 3*drand48() -1.5;
A[i].y = 3*drand48() -1.5;
}
//initialize value of the grid on device
cufftDoubleComplex *dev_A, *dev_A1, *dev_A2;
cudaMalloc((void**)&dev_A, N*N*sizeof(cufftDoubleComplex));
cudaMalloc((void**)&dev_A1, N*N*sizeof(cufftDoubleComplex));
cudaMalloc((void**)&dev_A2, N*N*sizeof(cufftDoubleComplex));
cudaMemcpy(dev_A1, A, N*N*sizeof(cufftDoubleComplex), cudaMemcpyHostToDevice);
cpy<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1);
//file output the initial value
FILE* file = fopen("CGL.out","w");
fwrite(A, sizeof(cufftDoubleComplex), N*N, file);
printf("Saved output at t=0\n");
//cufft plans
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_Z2Z);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//main loop
for(int step=0; step<M; ++step){
//first step in RK4
cufftExecZ2Z(plan, dev_A1, dev_A2, CUFFT_FORWARD);
spectrald<<<blocksPerGrid, threadsPerBlock>>>(dev_A2);
cufftExecZ2Z(plan, dev_A2, dev_A2, CUFFT_INVERSE);
RK4<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1, dev_A2, 0.25);
//second step in RK4
cufftExecZ2Z(plan, dev_A1, dev_A2, CUFFT_FORWARD);
spectrald<<<blocksPerGrid, threadsPerBlock>>>(dev_A2);
cufftExecZ2Z(plan, dev_A2, dev_A2, CUFFT_INVERSE);
RK4<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1, dev_A2, 1.0/3);
//third step in RK4
cufftExecZ2Z(plan, dev_A1, dev_A2, CUFFT_FORWARD);
spectrald<<<blocksPerGrid, threadsPerBlock>>>(dev_A2);
cufftExecZ2Z(plan, dev_A2, dev_A2, CUFFT_INVERSE);
RK4<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1, dev_A2, 0.5);
//fourth step in RK4
cufftExecZ2Z(plan, dev_A1, dev_A2, CUFFT_FORWARD);
spectrald<<<blocksPerGrid, threadsPerBlock>>>(dev_A2);
cufftExecZ2Z(plan, dev_A2, dev_A2, CUFFT_INVERSE);
RK4<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1, dev_A2, 1.0);
//store the final value of this time step to A
cpy<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_A1);
//output to files
if((step+1)%(M/10) == 0)
{
cudaMemcpy(A, dev_A, N*N*sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost);
fwrite(A, sizeof(cufftDoubleComplex), N*N, file);
printf("Saved output at t=%d\n", (step+1)/10);
}
}//main loop
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//close files and free memories
fclose(file);
cufftDestroy(plan);
cudaFree(dev_A);
cudaFree(dev_A1);
cudaFree(dev_A2);
free(A);
//print runtime and output to the file
FILE* tfile = fopen("runtime.dat", "a");
fprintf(tfile, "%d %lf\n", N, elapsedTime);
printf("Time: %gms\n", elapsedTime);
fclose(tfile);
return 0;
} |
a30308ae4ceb2695ee5538423d30a2dc948980ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "mpi.h"
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <future>
#include <stdio.h>
#define cudaCall(val) __checkCudaErrors__((val), #val, __FILE__, __LINE__)
/*
A test program that tries to call mpi_isend from within a cuda event callback.
Each rank starts S streams and sends them asynchronously to its right neighbor
in a ring. Each rank first initializes the send buffers, then issues an mpi_irecv for a device buffer,
then calls a memcpy h-to-d, followed by a kernel in a stream; the stream also enques a
callback that when executed starts the mpi_isend from a device buffer.
It then waits for everybody and copies the buffers back onto the host to print.
Author: Christoph Angerer
*/
#define USE_GPU
#define USE_CALLBACK
#define USE_BACKGROUND_ISEND
template < typename T >
inline void __checkCudaErrors__(T code, const char* func, const char* file, int line) {
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n", file, line, (unsigned int)code, func);
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
// keep #streams and #mpi ranks < 10 to keep the 1-digit encoding intact
#define S 8
#define N 100
__global__ void MyKernel(int myid, int* buffer) {
buffer[threadIdx.x] += 10 * myid;
}
struct CallbackInfo {
int* send_buffer_d;
int device_id;
int dest;
int tag;
int myid;
MPI_Request send_request;
};
void CUDART_CB MyCallback(hipStream_t stream, hipError_t status, void* data) {
CallbackInfo* info = (CallbackInfo*)data;
printf("Callback called: dest=%d, tag=%d\n", info->dest, info->tag);
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice inside callback result=%d\n", result);
printf("Using device_id %d\n", info->device_id);
#ifdef USE_BACKGROUND_ISEND
auto future = std::async(std::launch::async, [&info]() {
// need to set the device, otherwise I get a "illegal context" error
cudaCall(hipSetDevice(info->device_id));
printf("Hello from device %d tag %d\n", info->device_id, info->tag);
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice inside callback inside background thread result=%d\n", result);
// MPI_Isend and MPI_send both deadlock here.
printf("Sending %d %p %d %d %d\n", info->myid, info->send_buffer_d, info->dest, N, info->tag);
MPI_Send(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD);
printf("Bye %d %d %d\n", info->myid, info->dest, info->tag);
});
#else
// This is what we want, but it fails with a hipErrorNotPermitted in cuCtxtGetDevice()
MPI_Isend(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD, &info->send_request);
#endif
}
int main(int argc, char* argv[]) {
int myid, numprocs, left, right;
int recv_buffer[S][N], send_buffer[S][N];
CallbackInfo infos[S];
MPI_Request recv_request[S];
MPI_Status status;
const char* myid_c = std::getenv("SLURM_PROCID");
if (!myid_c) {
printf("SLURM_PROCID not set");
exit(EXIT_FAILURE);
}
const char* nprocs_c = std::getenv("SLURM_NPROCS");
if (!nprocs_c) {
printf("SLURM_NPROCS not set");
exit(EXIT_FAILURE);
}
const char* g2g_c = std::getenv("G2G");
if (!g2g_c) {
printf("G2G not set");
exit(EXIT_FAILURE);
}
myid = atoi(myid_c);
numprocs = atoi(nprocs_c);
int g2g = atoi(g2g_c);
assert(g2g < 3 || g2g >= 0);
int numgpus = numprocs;
if (g2g != 2)
numgpus = 1;
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
#ifdef USE_GPU
// cudaCall(hipGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
cudaCall(hipSetDevice(myid % numgpus));
#endif
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE) {
printf("ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD, &myid);
#ifdef USE_GPU
int* recv_buffer_d[S];
hipStream_t streams[S];
#endif
right = (myid + 1) % numprocs;
left = myid - 1;
if (left < 0)
left = numprocs - 1;
#ifdef USE_GPU
if (myid == 0)
printf("\nUSING GPU!\n");
#ifdef USE_CALLBACK
if (myid == 0)
printf("USING CALLBACK!\n");
#ifdef USE_BACKGROUND_ISEND
if (myid == 0)
printf("With background MPI_ISEND\n\n");
#else
if (myid == 0)
printf("With direct MPI_ISEND\n\n");
#endif
#else
if (myid == 0)
printf("USING NO CALLBACK\n\n");
#endif
// cudaCall(hipGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
// cudaCall(hipSetDevice(myid % numgpus));
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice outside callback result=%d; %d\n", result, myid);
// create streams and device buffers
for (int s = 0; s < S; s++) {
cudaCall(hipStreamCreate(&streams[s]));
cudaCall(hipMalloc(&recv_buffer_d[s], N * sizeof(int)));
cudaCall(hipMalloc(&infos[s].send_buffer_d, N * sizeof(int)));
}
#else
if (myid == 0)
printf("\nUSING CPU!\n\n");
#endif
// initialise send buffer elements with the stream number
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
send_buffer[s][i] = s;
}
}
if (myid == 1) {
printf("Rank %d send buffer:\n", myid);
printf("=========================================\n");
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
printf("%2d,", send_buffer[s][i]);
}
printf("\n");
}
}
for (int s = 0; s < S; s++) {
// kick off S receives on device
#ifdef USE_GPU
MPI_Irecv(recv_buffer_d[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#else
MPI_Irecv(recv_buffer[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#endif
printf("IRECV %d from %d with tag %d\n", myid, left, s);
printf("SETTING %d %d %d \n", myid, numgpus, myid % numgpus);
infos[s].device_id = myid % numgpus;
infos[s].dest = right;
infos[s].tag = s;
infos[s].myid = myid;
#ifdef USE_GPU
// enqueue asyncronous memcpy and kernel
cudaCall(hipMemcpyAsync(
infos[s].send_buffer_d, send_buffer[s], N * sizeof(int), hipMemcpyHostToDevice, streams[s]));
// the kernel will add 10*myid to the send_buffer so that the result is a number xy where x is id of the sender
// and y is the stream
MyKernel< < < 1, N, 0, streams[s] > > >(myid, infos[s].send_buffer_d);
printf("Kernel %d %d %d \n", myid, infos[s].device_id, numgpus);
#ifdef USE_CALLBACK
// enqueue the isend
cudaCall(hipStreamAddCallback(streams[s], MyCallback, &infos[s], 0));
#else
cudaCall(hipStreamSynchronize(streams[s]));
printf("Before ISend %d to %d, size %d with tag %d \n", myid, infos[s].dest, N, infos[s].tag);
MPI_Isend(
infos[s].send_buffer_d, N, MPI_INT, infos[s].dest, infos[s].tag, MPI_COMM_WORLD, &infos[s].send_request);
#endif
printf("ISend %d \n", myid);
#else
for (int i = 0; i < N; i++) {
send_buffer[s][i] += 10 * myid;
}
MPI_Isend(send_buffer[s], N, MPI_INT, right, s, MPI_COMM_WORLD, &infos[s].send_request);
#endif
}
for (int s = 0; s < S; s++) {
printf("Waiting %d \n", myid);
MPI_Wait(&recv_request[s], &status);
#ifndef USE_BACKGROUND_ISEND
MPI_Wait(&infos[s].send_request, &status);
#endif
#ifdef USE_GPU
cudaCall(
hipMemcpyAsync(recv_buffer[s], recv_buffer_d[s], N * sizeof(int), hipMemcpyDeviceToHost, streams[s]));
#endif
}
#ifdef USE_GPU
cudaCall(hipDeviceSynchronize());
#endif
if (myid == 0) {
printf("Rank %d got Result:\n", myid);
printf("=========================================\n");
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
// initialise send buffer elements with the stream number
printf("%2d,", recv_buffer[s][i]);
}
printf("\n");
}
}
MPI_Finalize();
printf("END %d \n", myid);
#ifdef USE_GPU
for (int s = 0; s < S; s++) {
hipStreamDestroy(streams[s]);
cudaCall(hipFree(recv_buffer_d[s]));
cudaCall(hipFree(infos[s].send_buffer_d));
}
#endif
return 0;
}
| a30308ae4ceb2695ee5538423d30a2dc948980ea.cu | #include "mpi.h"
#include <cassert>
#include <cuda.h>
#include <cuda_runtime.h>
#include <future>
#include <stdio.h>
#define cudaCall(val) __checkCudaErrors__((val), #val, __FILE__, __LINE__)
/*
A test program that tries to call mpi_isend from within a cuda event callback.
Each rank starts S streams and sends them asynchronously to its right neighbor
in a ring. Each rank first initializes the send buffers, then issues an mpi_irecv for a device buffer,
then calls a memcpy h-to-d, followed by a kernel in a stream; the stream also enques a
callback that when executed starts the mpi_isend from a device buffer.
It then waits for everybody and copies the buffers back onto the host to print.
Author: Christoph Angerer
*/
#define USE_GPU
#define USE_CALLBACK
#define USE_BACKGROUND_ISEND
template < typename T >
inline void __checkCudaErrors__(T code, const char* func, const char* file, int line) {
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n", file, line, (unsigned int)code, func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
// keep #streams and #mpi ranks < 10 to keep the 1-digit encoding intact
#define S 8
#define N 100
__global__ void MyKernel(int myid, int* buffer) {
buffer[threadIdx.x] += 10 * myid;
}
struct CallbackInfo {
int* send_buffer_d;
int device_id;
int dest;
int tag;
int myid;
MPI_Request send_request;
};
void CUDART_CB MyCallback(cudaStream_t stream, cudaError_t status, void* data) {
CallbackInfo* info = (CallbackInfo*)data;
printf("Callback called: dest=%d, tag=%d\n", info->dest, info->tag);
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice inside callback result=%d\n", result);
printf("Using device_id %d\n", info->device_id);
#ifdef USE_BACKGROUND_ISEND
auto future = std::async(std::launch::async, [&info]() {
// need to set the device, otherwise I get a "illegal context" error
cudaCall(cudaSetDevice(info->device_id));
printf("Hello from device %d tag %d\n", info->device_id, info->tag);
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice inside callback inside background thread result=%d\n", result);
// MPI_Isend and MPI_send both deadlock here.
printf("Sending %d %p %d %d %d\n", info->myid, info->send_buffer_d, info->dest, N, info->tag);
MPI_Send(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD);
printf("Bye %d %d %d\n", info->myid, info->dest, info->tag);
});
#else
// This is what we want, but it fails with a CUDA_ERROR_NOT_PERMITTED in cuCtxtGetDevice()
MPI_Isend(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD, &info->send_request);
#endif
}
int main(int argc, char* argv[]) {
int myid, numprocs, left, right;
int recv_buffer[S][N], send_buffer[S][N];
CallbackInfo infos[S];
MPI_Request recv_request[S];
MPI_Status status;
const char* myid_c = std::getenv("SLURM_PROCID");
if (!myid_c) {
printf("SLURM_PROCID not set");
exit(EXIT_FAILURE);
}
const char* nprocs_c = std::getenv("SLURM_NPROCS");
if (!nprocs_c) {
printf("SLURM_NPROCS not set");
exit(EXIT_FAILURE);
}
const char* g2g_c = std::getenv("G2G");
if (!g2g_c) {
printf("G2G not set");
exit(EXIT_FAILURE);
}
myid = atoi(myid_c);
numprocs = atoi(nprocs_c);
int g2g = atoi(g2g_c);
assert(g2g < 3 || g2g >= 0);
int numgpus = numprocs;
if (g2g != 2)
numgpus = 1;
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
#ifdef USE_GPU
// cudaCall(cudaGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
cudaCall(cudaSetDevice(myid % numgpus));
#endif
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE) {
printf("ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD, &myid);
#ifdef USE_GPU
int* recv_buffer_d[S];
cudaStream_t streams[S];
#endif
right = (myid + 1) % numprocs;
left = myid - 1;
if (left < 0)
left = numprocs - 1;
#ifdef USE_GPU
if (myid == 0)
printf("\nUSING GPU!\n");
#ifdef USE_CALLBACK
if (myid == 0)
printf("USING CALLBACK!\n");
#ifdef USE_BACKGROUND_ISEND
if (myid == 0)
printf("With background MPI_ISEND\n\n");
#else
if (myid == 0)
printf("With direct MPI_ISEND\n\n");
#endif
#else
if (myid == 0)
printf("USING NO CALLBACK\n\n");
#endif
// cudaCall(cudaGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
// cudaCall(cudaSetDevice(myid % numgpus));
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice outside callback result=%d; %d\n", result, myid);
// create streams and device buffers
for (int s = 0; s < S; s++) {
cudaCall(cudaStreamCreate(&streams[s]));
cudaCall(cudaMalloc(&recv_buffer_d[s], N * sizeof(int)));
cudaCall(cudaMalloc(&infos[s].send_buffer_d, N * sizeof(int)));
}
#else
if (myid == 0)
printf("\nUSING CPU!\n\n");
#endif
// initialise send buffer elements with the stream number
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
send_buffer[s][i] = s;
}
}
if (myid == 1) {
printf("Rank %d send buffer:\n", myid);
printf("=========================================\n");
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
printf("%2d,", send_buffer[s][i]);
}
printf("\n");
}
}
for (int s = 0; s < S; s++) {
// kick off S receives on device
#ifdef USE_GPU
MPI_Irecv(recv_buffer_d[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#else
MPI_Irecv(recv_buffer[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#endif
printf("IRECV %d from %d with tag %d\n", myid, left, s);
printf("SETTING %d %d %d \n", myid, numgpus, myid % numgpus);
infos[s].device_id = myid % numgpus;
infos[s].dest = right;
infos[s].tag = s;
infos[s].myid = myid;
#ifdef USE_GPU
// enqueue asyncronous memcpy and kernel
cudaCall(cudaMemcpyAsync(
infos[s].send_buffer_d, send_buffer[s], N * sizeof(int), cudaMemcpyHostToDevice, streams[s]));
// the kernel will add 10*myid to the send_buffer so that the result is a number xy where x is id of the sender
// and y is the stream
MyKernel< < < 1, N, 0, streams[s] > > >(myid, infos[s].send_buffer_d);
printf("Kernel %d %d %d \n", myid, infos[s].device_id, numgpus);
#ifdef USE_CALLBACK
// enqueue the isend
cudaCall(cudaStreamAddCallback(streams[s], MyCallback, &infos[s], 0));
#else
cudaCall(cudaStreamSynchronize(streams[s]));
printf("Before ISend %d to %d, size %d with tag %d \n", myid, infos[s].dest, N, infos[s].tag);
MPI_Isend(
infos[s].send_buffer_d, N, MPI_INT, infos[s].dest, infos[s].tag, MPI_COMM_WORLD, &infos[s].send_request);
#endif
printf("ISend %d \n", myid);
#else
for (int i = 0; i < N; i++) {
send_buffer[s][i] += 10 * myid;
}
MPI_Isend(send_buffer[s], N, MPI_INT, right, s, MPI_COMM_WORLD, &infos[s].send_request);
#endif
}
for (int s = 0; s < S; s++) {
printf("Waiting %d \n", myid);
MPI_Wait(&recv_request[s], &status);
#ifndef USE_BACKGROUND_ISEND
MPI_Wait(&infos[s].send_request, &status);
#endif
#ifdef USE_GPU
cudaCall(
cudaMemcpyAsync(recv_buffer[s], recv_buffer_d[s], N * sizeof(int), cudaMemcpyDeviceToHost, streams[s]));
#endif
}
#ifdef USE_GPU
cudaCall(cudaDeviceSynchronize());
#endif
if (myid == 0) {
printf("Rank %d got Result:\n", myid);
printf("=========================================\n");
for (int s = 0; s < S; s++) {
for (int i = 0; i < N; i++) {
// initialise send buffer elements with the stream number
printf("%2d,", recv_buffer[s][i]);
}
printf("\n");
}
}
MPI_Finalize();
printf("END %d \n", myid);
#ifdef USE_GPU
for (int s = 0; s < S; s++) {
cudaStreamDestroy(streams[s]);
cudaCall(cudaFree(recv_buffer_d[s]));
cudaCall(cudaFree(infos[s].send_buffer_d));
}
#endif
return 0;
}
|
b4b3ca6346862bfba683c3be3d5e9c92417caa45.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <limits.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
#include <pthread.h>
#include <string>
#include "../../utils/timer.h"
float feature_vect[] = {2.240018, 2.2570236, 0.11304555, -0.21307051,
0.8988138, 0.039065503, 0.023874786, 0.13153112,
0.15324382, 0.16986738, -0.020297153, -0.26773554,
0.40202165, 0.35923952, 0.060746543, 0.35402644,
0.086052455, -0.10499257, 0.04395058, 0.026407119,
-0.48301497, 0.120889395, 0.67980754, -0.19875681,
-0.5443737, -0.039534688, 0.20888293, 0.054865785,
-0.4846478, 0.1, 0.1, 0.1};
float *means_vect;
float *precs_vect;
float *weight_vect;
float *factor_vect;
float *score_vect;
__device__ __constant__ float logZero = -3.4028235E38;
__device__ __constant__ float maxLogValue = 7097004.5;
__device__ __constant__ float minLogValue = -7443538.0;
__device__ __constant__ float naturalLogBase = (float)1.00011595E-4;
__device__ __constant__ float inverseNaturalLogBase = 9998.841;
// fixed for a given accoustic model
__device__ __constant__ int comp_size = 32;
__device__ __constant__ int feat_size = 29;
__device__ __constant__ int senone_size = 5120;
extern "C"
__global__ void
computeScore(const float *feature_vect, float *means_vect,
float *precs_vect, float *weight_vect, float *factor_vect,
float *score_vect) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < senone_size) {
float local_score_vect = logZero;
#pragma unroll 32
for (int j = 0; j < comp_size; j++) {
// getScore
float logDval = 0.0f;
#pragma unroll 29
for (int k = 0; k < feat_size; k++) {
int idx = i + senone_size * j + k * comp_size * senone_size;
float logDiff = feature_vect[k] - means_vect[idx];
logDval += logDiff * logDiff * precs_vect[idx];
}
// Convert to the appropriate base.
if (logDval != logZero) {
logDval = logDval * inverseNaturalLogBase;
}
int idx2 = i + j * senone_size;
// Add the precomputed factor, with the appropriate sign.
logDval -= factor_vect[idx2];
if (logDval < logZero) {
logDval = logZero;
}
// end of getScore
float logVal2 = logDval + weight_vect[idx2];
float logHighestValue = local_score_vect;
float logDifference = local_score_vect - logVal2;
// difference is always a positive number
if (logDifference < 0) {
logHighestValue = logVal2;
logDifference = -logDifference;
}
float logValue = -logDifference;
float logInnerSummation;
if (logValue < minLogValue) {
logInnerSummation = 0.0;
} else if (logValue > maxLogValue) {
logInnerSummation = FLT_MAX;
} else {
if (logValue == logZero) {
logValue = logZero;
} else {
logValue = logValue * naturalLogBase;
}
logInnerSummation = __expf(logValue);
}
logInnerSummation += 1.0;
float returnLogValue;
if (logInnerSummation <= 0.0) {
returnLogValue = logZero;
} else {
returnLogValue = __logf(logInnerSummation) * inverseNaturalLogBase;
if (returnLogValue > FLT_MAX) {
returnLogValue = FLT_MAX;
} else if (returnLogValue < -FLT_MAX) {
returnLogValue = -FLT_MAX;
}
}
// sum log
local_score_vect = logHighestValue + returnLogValue;
}
score_vect[i] = local_score_vect;
}
}
int main(int argc, char *argv[]) {
if (argc < 2) {
fprintf(stderr, "[ERROR] Invalid arguments provided.\n\n");
fprintf(stderr, "Usage: %s [INPUT FILE]\n\n", argv[0]);
exit(0);
}
STATS_INIT("kernel", "gpu_gaussian_mixture_model");
PRINT_STAT_STRING("abrv", "gpu_gmm");
float *dev_feat_vect;
float cuda_elapsedTime;
hipEvent_t eStart, eStop;
int comp_size = 32;
int senone_size = 5120;
int means_array_size = senone_size * comp_size * comp_size;
int comp_array_size = senone_size * comp_size;
means_vect = (float *)malloc(means_array_size * sizeof(float));
precs_vect = (float *)malloc(means_array_size * sizeof(float));
weight_vect = (float *)malloc(comp_array_size * sizeof(float));
factor_vect = (float *)malloc(comp_array_size * sizeof(float));
float *means_vect2 = (float *)malloc(means_array_size * sizeof(float));
float *precs_vect2 = (float *)malloc(means_array_size * sizeof(float));
float *weight_vect2 = (float *)malloc(comp_array_size * sizeof(float));
float *factor_vect2 = (float *)malloc(comp_array_size * sizeof(float));
float *dev_means_vect;
float *dev_precs_vect;
float *dev_weight_vect;
float *dev_factor_vect;
score_vect = (float *)malloc(senone_size * sizeof(float));
float *dev_score_vect;
int blockSizeX = 256;
int gridSizeX = (int)ceil(senone_size / blockSizeX);
int div_grid = ((int)(gridSizeX / 32));
gridSizeX = (div_grid + 1) * 32;
// load model from file
FILE *fp = fopen(argv[1], "r");
if (fp == NULL) { // checks for the file
printf("\n Cant open file");
exit(-1);
}
int idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < comp_size; k++) {
float elem;
fscanf(fp, "%f", &elem);
means_vect[idx] = elem;
idx = idx + 1;
}
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < comp_size; k++) {
float elem;
fscanf(fp, "%f", &elem);
precs_vect[idx] = elem;
idx = idx + 1;
}
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
float elem;
fscanf(fp, "%f", &elem);
weight_vect[idx] = elem;
idx = idx + 1;
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
float elem;
fscanf(fp, "%f", &elem);
factor_vect[idx] = elem;
idx = idx + 1;
}
}
fclose(fp);
int idx3 = 0;
for (int j = 0; j < comp_size; j++) {
for (int i = 0; i < senone_size; i++) {
int ij = j + i * comp_size;
weight_vect2[idx3] = weight_vect[ij];
factor_vect2[idx3] = factor_vect[ij];
idx3 += 1;
}
}
int idx4 = 0;
for (int k = 0; k < comp_size; k++) {
for (int j = 0; j < comp_size; j++) {
for (int i = 0; i < senone_size; i++) {
int ijk = k + comp_size * j + i * comp_size * comp_size;
means_vect2[idx4] = means_vect[ijk];
precs_vect2[idx4] = precs_vect[ijk];
idx4 += 1;
}
}
}
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < 29; k++) {
int ijk = k + comp_size * j + i * comp_size * comp_size;
int kji = i + senone_size * j + k * comp_size * senone_size;
if (means_vect2[kji] != means_vect[ijk]) {
printf("%f != %f\n", means_vect2[kji], means_vect[ijk]);
}
}
}
}
hipEventCreate(&eStart);
hipEventCreate(&eStop);
// just one time to load acoustic model
hipMalloc((void **)&dev_means_vect, sizeof(float) * means_array_size);
hipMalloc((void **)&dev_precs_vect, sizeof(float) * means_array_size);
hipMalloc((void **)&dev_weight_vect, sizeof(float) * comp_array_size);
hipMalloc((void **)&dev_factor_vect, sizeof(float) * comp_array_size);
hipMemcpy(dev_means_vect, means_vect2, sizeof(float) * means_array_size,
hipMemcpyHostToDevice);
hipMemcpy(dev_precs_vect, precs_vect2, sizeof(float) * means_array_size,
hipMemcpyHostToDevice);
hipMemcpy(dev_weight_vect, weight_vect2, sizeof(float) * comp_array_size,
hipMemcpyHostToDevice);
hipMemcpy(dev_factor_vect, factor_vect2, sizeof(float) * comp_array_size,
hipMemcpyHostToDevice);
hipMalloc((void **)&dev_feat_vect, sizeof(float) * comp_size);
hipMalloc((void **)&dev_score_vect, sizeof(float) * senone_size);
PRINT_STAT_INT("blockSizeX", blockSizeX);
PRINT_STAT_INT("gridSizeX", gridSizeX);
dim3 block(128);
dim3 grid;
grid.x = (senone_size + block.x - 1) / block.x;
if (grid.x < 32) grid.x = 32;
hipEventRecord(eStart, 0);
// each time needed for computing score of a given feature vect
hipEventRecord(eStart, 0);
hipMemcpy(dev_feat_vect, feature_vect, comp_size * sizeof(float),
hipMemcpyHostToDevice);
hipEventRecord(eStop, 0);
hipEventSynchronize(eStop);
hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("host_to_device", cuda_elapsedTime);
hipEventRecord(eStart, 0);
computeScore << <grid, block>>> (dev_feat_vect, dev_means_vect,
dev_precs_vect, dev_weight_vect,
dev_factor_vect, dev_score_vect);
hipEventRecord(eStop, 0);
hipEventSynchronize(eStop);
hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("gpu_gmm", cuda_elapsedTime);
hipEventRecord(eStart, 0);
hipMemcpy(score_vect, dev_score_vect, senone_size * sizeof(float),
hipMemcpyDeviceToHost);
hipEventRecord(eStop, 0);
hipEventSynchronize(eStop);
hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("device_to_host", cuda_elapsedTime);
STATS_END();
#if TESTING
FILE *f = fopen("../input/gmm_scoring.gpu", "w");
for (int i = 0; i < senone_size; ++i) fprintf(f, "%.0f\n", score_vect[i]);
fclose(f);
#endif
hipEventRecord(eStop, 0);
hipEventSynchronize(eStop);
hipEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
free(means_vect);
free(precs_vect);
free(weight_vect);
free(factor_vect);
free(score_vect);
hipFree(dev_means_vect);
hipFree(dev_precs_vect);
hipFree(dev_weight_vect);
hipFree(dev_factor_vect);
hipFree(dev_feat_vect);
hipFree(dev_score_vect);
}
| b4b3ca6346862bfba683c3be3d5e9c92417caa45.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <limits.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
#include <pthread.h>
#include <string>
#include "../../utils/timer.h"
float feature_vect[] = {2.240018, 2.2570236, 0.11304555, -0.21307051,
0.8988138, 0.039065503, 0.023874786, 0.13153112,
0.15324382, 0.16986738, -0.020297153, -0.26773554,
0.40202165, 0.35923952, 0.060746543, 0.35402644,
0.086052455, -0.10499257, 0.04395058, 0.026407119,
-0.48301497, 0.120889395, 0.67980754, -0.19875681,
-0.5443737, -0.039534688, 0.20888293, 0.054865785,
-0.4846478, 0.1, 0.1, 0.1};
float *means_vect;
float *precs_vect;
float *weight_vect;
float *factor_vect;
float *score_vect;
__device__ __constant__ float logZero = -3.4028235E38;
__device__ __constant__ float maxLogValue = 7097004.5;
__device__ __constant__ float minLogValue = -7443538.0;
__device__ __constant__ float naturalLogBase = (float)1.00011595E-4;
__device__ __constant__ float inverseNaturalLogBase = 9998.841;
// fixed for a given accoustic model
__device__ __constant__ int comp_size = 32;
__device__ __constant__ int feat_size = 29;
__device__ __constant__ int senone_size = 5120;
extern "C"
__global__ void
computeScore(const float *feature_vect, float *means_vect,
float *precs_vect, float *weight_vect, float *factor_vect,
float *score_vect) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < senone_size) {
float local_score_vect = logZero;
#pragma unroll 32
for (int j = 0; j < comp_size; j++) {
// getScore
float logDval = 0.0f;
#pragma unroll 29
for (int k = 0; k < feat_size; k++) {
int idx = i + senone_size * j + k * comp_size * senone_size;
float logDiff = feature_vect[k] - means_vect[idx];
logDval += logDiff * logDiff * precs_vect[idx];
}
// Convert to the appropriate base.
if (logDval != logZero) {
logDval = logDval * inverseNaturalLogBase;
}
int idx2 = i + j * senone_size;
// Add the precomputed factor, with the appropriate sign.
logDval -= factor_vect[idx2];
if (logDval < logZero) {
logDval = logZero;
}
// end of getScore
float logVal2 = logDval + weight_vect[idx2];
float logHighestValue = local_score_vect;
float logDifference = local_score_vect - logVal2;
// difference is always a positive number
if (logDifference < 0) {
logHighestValue = logVal2;
logDifference = -logDifference;
}
float logValue = -logDifference;
float logInnerSummation;
if (logValue < minLogValue) {
logInnerSummation = 0.0;
} else if (logValue > maxLogValue) {
logInnerSummation = FLT_MAX;
} else {
if (logValue == logZero) {
logValue = logZero;
} else {
logValue = logValue * naturalLogBase;
}
logInnerSummation = __expf(logValue);
}
logInnerSummation += 1.0;
float returnLogValue;
if (logInnerSummation <= 0.0) {
returnLogValue = logZero;
} else {
returnLogValue = __logf(logInnerSummation) * inverseNaturalLogBase;
if (returnLogValue > FLT_MAX) {
returnLogValue = FLT_MAX;
} else if (returnLogValue < -FLT_MAX) {
returnLogValue = -FLT_MAX;
}
}
// sum log
local_score_vect = logHighestValue + returnLogValue;
}
score_vect[i] = local_score_vect;
}
}
int main(int argc, char *argv[]) {
if (argc < 2) {
fprintf(stderr, "[ERROR] Invalid arguments provided.\n\n");
fprintf(stderr, "Usage: %s [INPUT FILE]\n\n", argv[0]);
exit(0);
}
STATS_INIT("kernel", "gpu_gaussian_mixture_model");
PRINT_STAT_STRING("abrv", "gpu_gmm");
float *dev_feat_vect;
float cuda_elapsedTime;
cudaEvent_t eStart, eStop;
int comp_size = 32;
int senone_size = 5120;
int means_array_size = senone_size * comp_size * comp_size;
int comp_array_size = senone_size * comp_size;
means_vect = (float *)malloc(means_array_size * sizeof(float));
precs_vect = (float *)malloc(means_array_size * sizeof(float));
weight_vect = (float *)malloc(comp_array_size * sizeof(float));
factor_vect = (float *)malloc(comp_array_size * sizeof(float));
float *means_vect2 = (float *)malloc(means_array_size * sizeof(float));
float *precs_vect2 = (float *)malloc(means_array_size * sizeof(float));
float *weight_vect2 = (float *)malloc(comp_array_size * sizeof(float));
float *factor_vect2 = (float *)malloc(comp_array_size * sizeof(float));
float *dev_means_vect;
float *dev_precs_vect;
float *dev_weight_vect;
float *dev_factor_vect;
score_vect = (float *)malloc(senone_size * sizeof(float));
float *dev_score_vect;
int blockSizeX = 256;
int gridSizeX = (int)ceil(senone_size / blockSizeX);
int div_grid = ((int)(gridSizeX / 32));
gridSizeX = (div_grid + 1) * 32;
// load model from file
FILE *fp = fopen(argv[1], "r");
if (fp == NULL) { // checks for the file
printf("\n Can’t open file");
exit(-1);
}
int idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < comp_size; k++) {
float elem;
fscanf(fp, "%f", &elem);
means_vect[idx] = elem;
idx = idx + 1;
}
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < comp_size; k++) {
float elem;
fscanf(fp, "%f", &elem);
precs_vect[idx] = elem;
idx = idx + 1;
}
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
float elem;
fscanf(fp, "%f", &elem);
weight_vect[idx] = elem;
idx = idx + 1;
}
}
idx = 0;
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
float elem;
fscanf(fp, "%f", &elem);
factor_vect[idx] = elem;
idx = idx + 1;
}
}
fclose(fp);
int idx3 = 0;
for (int j = 0; j < comp_size; j++) {
for (int i = 0; i < senone_size; i++) {
int ij = j + i * comp_size;
weight_vect2[idx3] = weight_vect[ij];
factor_vect2[idx3] = factor_vect[ij];
idx3 += 1;
}
}
int idx4 = 0;
for (int k = 0; k < comp_size; k++) {
for (int j = 0; j < comp_size; j++) {
for (int i = 0; i < senone_size; i++) {
int ijk = k + comp_size * j + i * comp_size * comp_size;
means_vect2[idx4] = means_vect[ijk];
precs_vect2[idx4] = precs_vect[ijk];
idx4 += 1;
}
}
}
for (int i = 0; i < senone_size; i++) {
for (int j = 0; j < comp_size; j++) {
for (int k = 0; k < 29; k++) {
int ijk = k + comp_size * j + i * comp_size * comp_size;
int kji = i + senone_size * j + k * comp_size * senone_size;
if (means_vect2[kji] != means_vect[ijk]) {
printf("%f != %f\n", means_vect2[kji], means_vect[ijk]);
}
}
}
}
cudaEventCreate(&eStart);
cudaEventCreate(&eStop);
// just one time to load acoustic model
cudaMalloc((void **)&dev_means_vect, sizeof(float) * means_array_size);
cudaMalloc((void **)&dev_precs_vect, sizeof(float) * means_array_size);
cudaMalloc((void **)&dev_weight_vect, sizeof(float) * comp_array_size);
cudaMalloc((void **)&dev_factor_vect, sizeof(float) * comp_array_size);
cudaMemcpy(dev_means_vect, means_vect2, sizeof(float) * means_array_size,
cudaMemcpyHostToDevice);
cudaMemcpy(dev_precs_vect, precs_vect2, sizeof(float) * means_array_size,
cudaMemcpyHostToDevice);
cudaMemcpy(dev_weight_vect, weight_vect2, sizeof(float) * comp_array_size,
cudaMemcpyHostToDevice);
cudaMemcpy(dev_factor_vect, factor_vect2, sizeof(float) * comp_array_size,
cudaMemcpyHostToDevice);
cudaMalloc((void **)&dev_feat_vect, sizeof(float) * comp_size);
cudaMalloc((void **)&dev_score_vect, sizeof(float) * senone_size);
PRINT_STAT_INT("blockSizeX", blockSizeX);
PRINT_STAT_INT("gridSizeX", gridSizeX);
dim3 block(128);
dim3 grid;
grid.x = (senone_size + block.x - 1) / block.x;
if (grid.x < 32) grid.x = 32;
cudaEventRecord(eStart, 0);
// each time needed for computing score of a given feature vect
cudaEventRecord(eStart, 0);
cudaMemcpy(dev_feat_vect, feature_vect, comp_size * sizeof(float),
cudaMemcpyHostToDevice);
cudaEventRecord(eStop, 0);
cudaEventSynchronize(eStop);
cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("host_to_device", cuda_elapsedTime);
cudaEventRecord(eStart, 0);
computeScore << <grid, block>>> (dev_feat_vect, dev_means_vect,
dev_precs_vect, dev_weight_vect,
dev_factor_vect, dev_score_vect);
cudaEventRecord(eStop, 0);
cudaEventSynchronize(eStop);
cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("gpu_gmm", cuda_elapsedTime);
cudaEventRecord(eStart, 0);
cudaMemcpy(score_vect, dev_score_vect, senone_size * sizeof(float),
cudaMemcpyDeviceToHost);
cudaEventRecord(eStop, 0);
cudaEventSynchronize(eStop);
cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
PRINT_STAT_DOUBLE("device_to_host", cuda_elapsedTime);
STATS_END();
#if TESTING
FILE *f = fopen("../input/gmm_scoring.gpu", "w");
for (int i = 0; i < senone_size; ++i) fprintf(f, "%.0f\n", score_vect[i]);
fclose(f);
#endif
cudaEventRecord(eStop, 0);
cudaEventSynchronize(eStop);
cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop);
free(means_vect);
free(precs_vect);
free(weight_vect);
free(factor_vect);
free(score_vect);
cudaFree(dev_means_vect);
cudaFree(dev_precs_vect);
cudaFree(dev_weight_vect);
cudaFree(dev_factor_vect);
cudaFree(dev_feat_vect);
cudaFree(dev_score_vect);
}
|
fa14cd571bb12f561f1da61c9dc93506809be54c.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <cusp/nlog10.cuh>
#include <cmath>
using namespace cusp;
template <typename T>
void run_test(int N, float n, float k)
{
std::vector<T> host_input_data(N);
std::vector<T> expected_output_data(N);
for (int i = 0; i < N; i++) {
host_input_data[i] = i + 1;
expected_output_data[i] = (T)n * (T)log10(float(i)) + (T)k;
}
std::vector<T> host_output_data(N);
void *dev_input_data;
void *dev_output_data;
hipMalloc(&dev_input_data, N * sizeof(T));
hipMalloc(&dev_output_data, N * sizeof(T));
hipMemcpy(dev_input_data, host_input_data.data(),
N * sizeof(T), hipMemcpyHostToDevice);
cusp::nlog10<T> op(n, k);
int minGrid, blockSize, gridSize;
op.occupancy(&blockSize, &minGrid);
gridSize = (N + blockSize - 1) / blockSize;
op.set_block_and_grid(blockSize, gridSize);
op.launch({dev_input_data}, {dev_output_data}, N);
hipDeviceSynchronize();
hipMemcpy(host_output_data.data(), dev_output_data,
N * sizeof(T), hipMemcpyDeviceToHost);
EXPECT_EQ(expected_output_data, host_output_data);
}
template <>
void run_test<float>(int N, float n, float k)
{
std::vector<float> host_input_data(N);
std::vector<float> expected_output_data(N);
for (int i = 0; i < N; i++) {
host_input_data[i] = i + 1;
expected_output_data[i] = n * (float)log10(host_input_data[i]) + k;
}
std::vector<float> host_output_data(N);
void *dev_input_data;
void *dev_output_data;
hipMalloc(&dev_input_data, N * sizeof(float));
hipMalloc(&dev_output_data, N * sizeof(float));
hipMemcpy(dev_input_data, host_input_data.data(),
N * sizeof(float), hipMemcpyHostToDevice);
cusp::nlog10<float> op(n, k);
int minGrid, blockSize, gridSize;
op.occupancy(&blockSize, &minGrid);
gridSize = (N + blockSize - 1) / blockSize;
op.set_block_and_grid(blockSize, gridSize);
op.launch({dev_input_data}, {dev_output_data}, N);
hipDeviceSynchronize();
hipMemcpy(host_output_data.data(), dev_output_data,
N * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < (int)expected_output_data.size(); i++) {
EXPECT_NEAR(expected_output_data[i],
host_output_data[i],
expected_output_data[i] / 10000);
}
}
TEST(Nlog10Kernel, Basic) {
int N = 100;
float n = 2.0;
float k = 2.0;
run_test<float>(N, n, k);
//run_test<int>(N, n, k);
} | fa14cd571bb12f561f1da61c9dc93506809be54c.cu | #include <gtest/gtest.h>
#include <cusp/nlog10.cuh>
#include <cmath>
using namespace cusp;
template <typename T>
void run_test(int N, float n, float k)
{
std::vector<T> host_input_data(N);
std::vector<T> expected_output_data(N);
for (int i = 0; i < N; i++) {
host_input_data[i] = i + 1;
expected_output_data[i] = (T)n * (T)log10(float(i)) + (T)k;
}
std::vector<T> host_output_data(N);
void *dev_input_data;
void *dev_output_data;
cudaMalloc(&dev_input_data, N * sizeof(T));
cudaMalloc(&dev_output_data, N * sizeof(T));
cudaMemcpy(dev_input_data, host_input_data.data(),
N * sizeof(T), cudaMemcpyHostToDevice);
cusp::nlog10<T> op(n, k);
int minGrid, blockSize, gridSize;
op.occupancy(&blockSize, &minGrid);
gridSize = (N + blockSize - 1) / blockSize;
op.set_block_and_grid(blockSize, gridSize);
op.launch({dev_input_data}, {dev_output_data}, N);
cudaDeviceSynchronize();
cudaMemcpy(host_output_data.data(), dev_output_data,
N * sizeof(T), cudaMemcpyDeviceToHost);
EXPECT_EQ(expected_output_data, host_output_data);
}
template <>
void run_test<float>(int N, float n, float k)
{
std::vector<float> host_input_data(N);
std::vector<float> expected_output_data(N);
for (int i = 0; i < N; i++) {
host_input_data[i] = i + 1;
expected_output_data[i] = n * (float)log10(host_input_data[i]) + k;
}
std::vector<float> host_output_data(N);
void *dev_input_data;
void *dev_output_data;
cudaMalloc(&dev_input_data, N * sizeof(float));
cudaMalloc(&dev_output_data, N * sizeof(float));
cudaMemcpy(dev_input_data, host_input_data.data(),
N * sizeof(float), cudaMemcpyHostToDevice);
cusp::nlog10<float> op(n, k);
int minGrid, blockSize, gridSize;
op.occupancy(&blockSize, &minGrid);
gridSize = (N + blockSize - 1) / blockSize;
op.set_block_and_grid(blockSize, gridSize);
op.launch({dev_input_data}, {dev_output_data}, N);
cudaDeviceSynchronize();
cudaMemcpy(host_output_data.data(), dev_output_data,
N * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < (int)expected_output_data.size(); i++) {
EXPECT_NEAR(expected_output_data[i],
host_output_data[i],
expected_output_data[i] / 10000);
}
}
TEST(Nlog10Kernel, Basic) {
int N = 100;
float n = 2.0;
float k = 2.0;
run_test<float>(N, n, k);
//run_test<int>(N, n, k);
} |
5a22b4a44d4f4f67c787caa1ca9a0fa332bc5794.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <hip/hip_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 8
#define COLORS 256
texture<uchar4, 2, hipReadModeElementType> tex_ref;
hipChannelFormatDesc tex_channel_desc;
uchar4 *d_image_data;
unsigned int image_width;
unsigned int image_height;
unsigned int image_bpp; //Bits Per Pixel = 8, 16, 24, or 32 bit
size_t image_pitch;
size_t histogram_tex_pitch;
size_t image_tex_pitch;
uchar4 *d_image_linear_pitch_texture_data = nullptr;
hipArray *d_array_texture_data = nullptr;
uchar3 *dst_histogram_data;
uchar3 *dst_image_data;
KernelSetting image_ks;
KernelSetting histogram_ks;
float *d_output_data = nullptr;
unsigned int histogram_width = 255;
unsigned int histogram_height = 200;
const int size = COLORS * sizeof(float);
float *d_red;
float *d_green;
float *d_blue;
float *d_max;
float *d_color;
float *h_red;
float *h_green;
float *h_blue;
float h_max[3];
float h_color[3];
__global__ void search_colors(const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, float *d_red, float *d_green, float *d_blue, float *d_color, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * (dst_pitch / 3);
const uchar4 texel = tex2D(tex_ref, col, row);
//printf("%f %f %f\n", texel.z, texel.y, texel.x);
uchar3 output;
output.x = texel.z;
output.y = texel.y;
output.z = texel.x;
dst[offset] = output;
d_red[texel.x]++;
d_green[texel.y]++;
d_blue[texel.z]++;
}
__global__ void draw_histogram (const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, float *d_red, float *d_green, float *d_blue, float *d_max, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * (dst_pitch / 1);
uchar3 texel;
/*[texel.x]++;
d_green[texel.y]++;
d_blue[texel.z]++;*/
//printf("%d %d\n", col, row);
texel.x = 100;
texel.y = 0;
texel.z = 0;
dst[offset] = texel;
}
#pragma region STEP 1
void load_source_image(const char* image_file_name)
{
FreeImage_Initialise();
auto tmp = ImageManager::GenericLoader(image_file_name, 0);
tmp = FreeImage_ConvertTo32Bits(tmp);
image_width = FreeImage_GetWidth(tmp);
image_height = FreeImage_GetHeight(tmp);
image_bpp = FreeImage_GetBPP(tmp);
image_pitch = FreeImage_GetPitch(tmp);
//checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_image_data), image_pitch * image_height * image_bpp / 8));
//checkCudaErrors(hipMemcpy(d_image_data, FreeImage_GetBits(tmp), image_pitch * image_height * image_bpp / 8, hipMemcpyHostToDevice));
checkCudaErrors(hipMallocPitch(&d_image_data, &image_pitch, image_width * sizeof(uchar4), image_height));
checkCudaErrors(hipMemcpy2D(d_image_data, image_pitch, FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), image_width * sizeof(uchar4), image_height, hipMemcpyHostToDevice));
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
#pragma region STEP 2
void create_src_texure()
{
//Floating Point Texture Data
checkCudaErrors(hipMallocPitch(reinterpret_cast<void**>(&d_image_linear_pitch_texture_data), &image_tex_pitch, image_width * sizeof(uchar4), image_height));
//Converts custom image data to float and stores result in the float_pitch_linear_data
/*switch (image_bpp)
{
case 8: colorToUchar4<8> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 16: colorToUchar4<16> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 24: colorToUchar4<24> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 32: colorToUchar4<32> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
}*/
//checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "", "");
//Texture settings
//tex_channel_desc = hipCreateChannelDesc<uchar4>();
tex_channel_desc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
tex_ref.normalized = false;
tex_ref.filterMode = hipFilterModePoint;
tex_ref.addressMode[0] = hipAddressModeClamp;
tex_ref.addressMode[1] = hipAddressModeClamp;
checkCudaErrors(hipBindTexture2D(nullptr, &tex_ref, d_image_data, &tex_channel_desc, image_width, image_height, image_tex_pitch));
}
#pragma endregion
#pragma region STEP 3
//TASK: Convert the input image into normal map. Use the binded texture (srcTexRef).
void create_histogram()
{
checkCudaErrors(hipMallocPitch(reinterpret_cast<void**>(&dst_image_data), &image_tex_pitch, image_width * sizeof(uchar3), image_height));
h_red = static_cast<float*>(malloc(size));
h_green = static_cast<float*>(malloc(size));
h_blue = static_cast<float*>(malloc(size));
for (auto i = 0; i < COLORS; i++)
{
h_red[i] = 0;
h_green[i] = 0;
h_blue[i] = 0;
}
checkCudaErrors(hipMalloc(&d_red, size));
checkCudaErrors(hipMalloc(&d_green, size));
checkCudaErrors(hipMalloc(&d_blue, size));
checkCudaErrors(hipMalloc(&d_color, 3 * sizeof(float)));
checkCudaErrors(hipMemcpy(d_red, h_red, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_green, h_green, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_blue, h_blue, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_color, h_color, 3 * sizeof(float), hipMemcpyHostToDevice));
search_colors << <image_ks.dimGrid, image_ks.dimBlock >> > (image_width, image_height, image_tex_pitch, d_red, d_green, d_blue, d_color, dst_image_data);
checkCudaErrors(hipMemcpy(h_red, d_red, size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_green, size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_blue, size, hipMemcpyDeviceToHost));
for (auto i = 0; i < 256; i++)
{
if (h_red[i] > h_max[0])
{
h_max[0] = h_red[i];
}
if (h_green[i] > h_max[1])
{
h_max[1] = h_green[i];
}
if (h_blue[i] > h_max[2])
{
h_max[2] = h_blue[i];
}
}
checkCudaErrors(hipMalloc(&d_max, 3 * sizeof(float)));
checkCudaErrors(hipMemcpy(d_max, h_max, 3 * sizeof(float), hipMemcpyHostToDevice));
draw_histogram << <histogram_ks.dimGrid, histogram_ks.dimBlock >> > (histogram_width, histogram_height, histogram_tex_pitch, d_red, d_green, d_blue, d_max, dst_histogram_data);
}
#pragma endregion
#pragma region STEP 4
//TASK: Save output image (normal map)
void save_histogram_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(histogram_width, histogram_height, 24);
checkCudaErrors(hipMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_histogram_data, histogram_tex_pitch, histogram_width * 3, histogram_height, hipMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, imageFileName, 0);
ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
void save_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(image_width, image_height, 32);
checkCudaErrors(hipMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_image_data, image_tex_pitch, image_width * 4, image_height, hipMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, image_file_name, 0);
FreeImage_Save(FIF_PNG, tmp, image_file_name, 0);
//ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
void release_memory()
{
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_max));
checkCudaErrors(hipFree(d_color));
free(h_red);
free(h_green);
free(h_blue);
h_red = nullptr;
h_green = nullptr;
h_blue = nullptr;
hipUnbindTexture(tex_ref);
if (d_image_data != nullptr)
checkCudaErrors(hipFree(d_image_data));
if (d_image_linear_pitch_texture_data != nullptr)
checkCudaErrors(hipFree(d_image_linear_pitch_texture_data));
if (d_array_texture_data)
checkCudaErrors(hipFreeArray(d_array_texture_data));
if (d_output_data)
checkCudaErrors(hipFree(d_output_data));
}
void project_color()
{
//STEP 1
load_source_image("image.tif");
h_color[0] = 100;
h_color[1] = 100;
h_color[2] = 100;
//TODO: Setup the kernel settings
image_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
image_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
image_ks.dimGrid = dim3((image_width + BLOCK_DIM - 1) / BLOCK_DIM, (image_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
histogram_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
histogram_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
histogram_ks.dimGrid = dim3((histogram_width + BLOCK_DIM - 1) / BLOCK_DIM, (histogram_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Step 2 - create heighmap texture stored in the linear pitch memory
create_src_texure();
//Step 3 - create the normal map
create_histogram();
//Step 4 - save the normal map
//save_histogram_image("histogram.bmp");
save_image("founded.bmp");
release_memory();
std::getchar();
}
| 5a22b4a44d4f4f67c787caa1ca9a0fa332bc5794.cu | // includes, cuda
#include <cuda_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 8
#define COLORS 256
texture<uchar4, 2, cudaReadModeElementType> tex_ref;
cudaChannelFormatDesc tex_channel_desc;
uchar4 *d_image_data;
unsigned int image_width;
unsigned int image_height;
unsigned int image_bpp; //Bits Per Pixel = 8, 16, 24, or 32 bit
size_t image_pitch;
size_t histogram_tex_pitch;
size_t image_tex_pitch;
uchar4 *d_image_linear_pitch_texture_data = nullptr;
cudaArray *d_array_texture_data = nullptr;
uchar3 *dst_histogram_data;
uchar3 *dst_image_data;
KernelSetting image_ks;
KernelSetting histogram_ks;
float *d_output_data = nullptr;
unsigned int histogram_width = 255;
unsigned int histogram_height = 200;
const int size = COLORS * sizeof(float);
float *d_red;
float *d_green;
float *d_blue;
float *d_max;
float *d_color;
float *h_red;
float *h_green;
float *h_blue;
float h_max[3];
float h_color[3];
__global__ void search_colors(const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, float *d_red, float *d_green, float *d_blue, float *d_color, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * (dst_pitch / 3);
const uchar4 texel = tex2D(tex_ref, col, row);
//printf("%f %f %f\n", texel.z, texel.y, texel.x);
uchar3 output;
output.x = texel.z;
output.y = texel.y;
output.z = texel.x;
dst[offset] = output;
d_red[texel.x]++;
d_green[texel.y]++;
d_blue[texel.z]++;
}
__global__ void draw_histogram (const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, float *d_red, float *d_green, float *d_blue, float *d_max, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
const auto offset = col + row * (dst_pitch / 1);
uchar3 texel;
/*[texel.x]++;
d_green[texel.y]++;
d_blue[texel.z]++;*/
//printf("%d %d\n", col, row);
texel.x = 100;
texel.y = 0;
texel.z = 0;
dst[offset] = texel;
}
#pragma region STEP 1
void load_source_image(const char* image_file_name)
{
FreeImage_Initialise();
auto tmp = ImageManager::GenericLoader(image_file_name, 0);
tmp = FreeImage_ConvertTo32Bits(tmp);
image_width = FreeImage_GetWidth(tmp);
image_height = FreeImage_GetHeight(tmp);
image_bpp = FreeImage_GetBPP(tmp);
image_pitch = FreeImage_GetPitch(tmp);
//checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_image_data), image_pitch * image_height * image_bpp / 8));
//checkCudaErrors(cudaMemcpy(d_image_data, FreeImage_GetBits(tmp), image_pitch * image_height * image_bpp / 8, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMallocPitch(&d_image_data, &image_pitch, image_width * sizeof(uchar4), image_height));
checkCudaErrors(cudaMemcpy2D(d_image_data, image_pitch, FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), image_width * sizeof(uchar4), image_height, cudaMemcpyHostToDevice));
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
#pragma region STEP 2
void create_src_texure()
{
//Floating Point Texture Data
checkCudaErrors(cudaMallocPitch(reinterpret_cast<void**>(&d_image_linear_pitch_texture_data), &image_tex_pitch, image_width * sizeof(uchar4), image_height));
//Converts custom image data to float and stores result in the float_pitch_linear_data
/*switch (image_bpp)
{
case 8: colorToUchar4<8> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 16: colorToUchar4<16> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 24: colorToUchar4<24> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
case 32: colorToUchar4<32> << <image_ks.dimGrid, image_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, image_tex_pitch / sizeof(uchar4), d_image_linear_pitch_texture_data); break;
}*/
//checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "", "");
//Texture settings
//tex_channel_desc = cudaCreateChannelDesc<uchar4>();
tex_channel_desc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
tex_ref.normalized = false;
tex_ref.filterMode = cudaFilterModePoint;
tex_ref.addressMode[0] = cudaAddressModeClamp;
tex_ref.addressMode[1] = cudaAddressModeClamp;
checkCudaErrors(cudaBindTexture2D(nullptr, &tex_ref, d_image_data, &tex_channel_desc, image_width, image_height, image_tex_pitch));
}
#pragma endregion
#pragma region STEP 3
//TASK: Convert the input image into normal map. Use the binded texture (srcTexRef).
void create_histogram()
{
checkCudaErrors(cudaMallocPitch(reinterpret_cast<void**>(&dst_image_data), &image_tex_pitch, image_width * sizeof(uchar3), image_height));
h_red = static_cast<float*>(malloc(size));
h_green = static_cast<float*>(malloc(size));
h_blue = static_cast<float*>(malloc(size));
for (auto i = 0; i < COLORS; i++)
{
h_red[i] = 0;
h_green[i] = 0;
h_blue[i] = 0;
}
checkCudaErrors(cudaMalloc(&d_red, size));
checkCudaErrors(cudaMalloc(&d_green, size));
checkCudaErrors(cudaMalloc(&d_blue, size));
checkCudaErrors(cudaMalloc(&d_color, 3 * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_red, h_red, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_green, h_green, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_blue, h_blue, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_color, h_color, 3 * sizeof(float), cudaMemcpyHostToDevice));
search_colors << <image_ks.dimGrid, image_ks.dimBlock >> > (image_width, image_height, image_tex_pitch, d_red, d_green, d_blue, d_color, dst_image_data);
checkCudaErrors(cudaMemcpy(h_red, d_red, size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_green, size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_blue, size, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 256; i++)
{
if (h_red[i] > h_max[0])
{
h_max[0] = h_red[i];
}
if (h_green[i] > h_max[1])
{
h_max[1] = h_green[i];
}
if (h_blue[i] > h_max[2])
{
h_max[2] = h_blue[i];
}
}
checkCudaErrors(cudaMalloc(&d_max, 3 * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_max, h_max, 3 * sizeof(float), cudaMemcpyHostToDevice));
draw_histogram << <histogram_ks.dimGrid, histogram_ks.dimBlock >> > (histogram_width, histogram_height, histogram_tex_pitch, d_red, d_green, d_blue, d_max, dst_histogram_data);
}
#pragma endregion
#pragma region STEP 4
//TASK: Save output image (normal map)
void save_histogram_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(histogram_width, histogram_height, 24);
checkCudaErrors(cudaMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_histogram_data, histogram_tex_pitch, histogram_width * 3, histogram_height, cudaMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, imageFileName, 0);
ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
void save_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(image_width, image_height, 32);
checkCudaErrors(cudaMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_image_data, image_tex_pitch, image_width * 4, image_height, cudaMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, image_file_name, 0);
FreeImage_Save(FIF_PNG, tmp, image_file_name, 0);
//ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
void release_memory()
{
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_max));
checkCudaErrors(cudaFree(d_color));
free(h_red);
free(h_green);
free(h_blue);
h_red = nullptr;
h_green = nullptr;
h_blue = nullptr;
cudaUnbindTexture(tex_ref);
if (d_image_data != nullptr)
checkCudaErrors(cudaFree(d_image_data));
if (d_image_linear_pitch_texture_data != nullptr)
checkCudaErrors(cudaFree(d_image_linear_pitch_texture_data));
if (d_array_texture_data)
checkCudaErrors(cudaFreeArray(d_array_texture_data));
if (d_output_data)
checkCudaErrors(cudaFree(d_output_data));
}
void project_color()
{
//STEP 1
load_source_image("image.tif");
h_color[0] = 100;
h_color[1] = 100;
h_color[2] = 100;
//TODO: Setup the kernel settings
image_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
image_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
image_ks.dimGrid = dim3((image_width + BLOCK_DIM - 1) / BLOCK_DIM, (image_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
histogram_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
histogram_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
histogram_ks.dimGrid = dim3((histogram_width + BLOCK_DIM - 1) / BLOCK_DIM, (histogram_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Step 2 - create heighmap texture stored in the linear pitch memory
create_src_texure();
//Step 3 - create the normal map
create_histogram();
//Step 4 - save the normal map
//save_histogram_image("histogram.bmp");
save_image("founded.bmp");
release_memory();
std::getchar();
}
|
443ed7e84ded6b4e73d2367b1db3e306ce0e6b1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHHalf.h"
#include "THHTensorCopy.h"
#include "THHApply.cuh"
#include "THHNumerics.cuh"
template <typename T>
struct TensorAddConstantOp {
TensorAddConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(T* v) {
*v += val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorAddConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorAddConstantOp(half v) : val(v) {}
#else
TensorAddConstantOp(half v) : fval(THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in, val);
#else
float fin = __half2float(*in);
float fout = fin + fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hadd(*v, val);
#else
float fv = __half2float(*v);
fv += fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorSubConstantOp {
TensorSubConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in - val;
}
__device__ __forceinline__ void operator()(T* v) {
*v -= val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorSubConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorSubConstantOp(half v): val(THC_float2half(-(THC_half2float(v)))) {}
#else
TensorSubConstantOp(half v): fval(-(THC_half2float(v))) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in, val);
#else
float fin = __half2float(*in);
float fout = fin + fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hadd(*v, val);
#else
float fv = __half2float(*v);
fv += fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorMulConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorMulConstantOp(half v) : val(v) {}
#else
TensorMulConstantOp(half v) : fval(THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*in, val);
#else
float fin = __half2float(*in);
float fout = fin * fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hmul(*v, val);
#else
float fv = __half2float(*v);
fv *= fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorDivConstantOp {
TensorDivConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in / val;
}
__device__ __forceinline__ void operator()(T* v) {
*v /= val;
}
const T val;
};
template <>
struct TensorDivConstantOp<float> {
TensorDivConstantOp(float v) : val(1.f / v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
template <>
struct TensorDivConstantOp<double> {
TensorDivConstantOp(double v) : val(1. / v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(double* v) {
*v *= val;
}
const double val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorDivConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorDivConstantOp(half v) : val(ScalarInv<half>::to(v)) {}
#else
TensorDivConstantOp(half v) : fval(1.f / THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*in, val);
#else
float fin = __half2float(*in);
float fout = fin * fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hmul(*v, val);
#else
float fv = __half2float(*v);
fv *= fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <int Upper>
struct TensorTriOp {
TensorTriOp(float *start_, long stride0_, long stride1_, long k_)
: start(start_), stride0(stride0_), stride1(stride1_), k(k_) {}
__device__ __forceinline__ int mask(float *in) {
ptrdiff_t n = in - start;
long row, col;
if (stride0 > stride1)
{
row = (long) (n / stride0);
col = (long) ((n % stride0) / stride1);
}
else
{
row = (long) ((n % stride1) / stride0);
col = (long) (n / stride1);
}
return Upper ? (col - row >= k) : (col - row <= k);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = mask(in) ? *in : 0;
}
__device__ __forceinline__ void operator()(float* v) {
if (!mask(v))
*v = 0;
}
const float *start;
const long stride0, stride1, k;
};
void THCudaTensor_tril(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(src_->nDimension == 2, 1, "expected a matrix");
THCudaTensor *src = src_;
if (self_ == src_)
src = THCudaTensor_newContiguous(state, src_);
long stride0 = src->stride[0];
long stride1 = src->stride[1];
float *start = THCudaTensor_data(state, src) + src->storageOffset;
TensorTriOp<0> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1(state, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
if (self_ == src_)
THCudaTensor_freeCopyTo(state, src, src_);
THCudaCheck(hipGetLastError());
}
void THCudaTensor_triu(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(src_->nDimension == 2, 1, "expected a matrix");
THCudaTensor *src = src_;
if (self_ == src_)
src = THCudaTensor_newContiguous(state, src_);
long stride0 = src->stride[0];
long stride1 = src->stride[1];
float *start = THCudaTensor_data(state, src) + src->storageOffset;
TensorTriOp<1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1(state, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
if (self_ == src_)
THCudaTensor_freeCopyTo(state, src, src_);
THCudaCheck(hipGetLastError());
}
#include "generic/THCTensorMathPairwise.cu"
#include "THHGenerateAllTypes.h"
// Copy the kth diagonal of a matrix B to a vector A.
__global__ void THCudaTensor_copyFromDiagonal(float* a, float* b, long start, long size, long strideSum, long strideA) {
for (long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const long bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
__global__ void THCudaTensor_copyToDiagonal(float* a, float* b, long start, long size, long strideSum, long strideB) {
for (long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const long aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
void THCudaTensor_diag(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k){
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
int nDimension = THCudaTensor_nDimension(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
long stride0 = THCudaTensor_stride(state, src_, 0);
long stride1 = THCudaTensor_stride(state, src_, 1);
long size0 = THCudaTensor_size(state, src_, 0);
long size1 = THCudaTensor_size(state, src_, 1);
long size = (k > 0) ? min((long long)size0, (long long)size1 - k) : min((long long)size0 + k, (long long)size1);
THCudaTensor_resize1d(state, self_, size);
long strideSelf = THCudaTensor_stride(state, self_, 0);
const dim3 threads(min((long long)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (long long)size));
dim3 grid(min((long long)1024, (long long)THCCeilDiv(size, (long)threads.x)));
long start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCudaTensor_copyFromDiagonal), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, self_), THCudaTensor_data(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
long totalElements = THCudaTensor_nElement(state, src_);
long size = (k > 0) ? totalElements + k : totalElements - k;
long strideSrc = THCudaTensor_stride(state, src_, 0);
THCudaTensor_resize2d(state, self_, size, size);
THCudaTensor_zero(state, self_);
long stride0 = THCudaTensor_stride(state, self_, 0);
long stride1 = THCudaTensor_stride(state, self_, 1);
const dim3 threads(min((long long)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (long long)size));
dim3 grid(min((long long)1024, (long long)THCCeilDiv(size, (long)threads.x)));
long start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCudaTensor_copyToDiagonal), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, self_), THCudaTensor_data(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(hipGetLastError());
}
float THCudaTensor_trace(THCState *state, THCudaTensor *src_) {
THAssert(THCudaTensor_checkGPU(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCudaTensor *diag = THCudaTensor_new(state);
THCudaTensor_diag(state, diag, src_, 0);
float trace = THCudaTensor_sumall(state, diag);
THCudaTensor_free(state, diag);
return trace;
}
| 443ed7e84ded6b4e73d2367b1db3e306ce0e6b1a.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCHalf.h"
#include "THCTensorCopy.h"
#include "THCApply.cuh"
#include "THCNumerics.cuh"
template <typename T>
struct TensorAddConstantOp {
TensorAddConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(T* v) {
*v += val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorAddConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorAddConstantOp(half v) : val(v) {}
#else
TensorAddConstantOp(half v) : fval(THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in, val);
#else
float fin = __half2float(*in);
float fout = fin + fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hadd(*v, val);
#else
float fv = __half2float(*v);
fv += fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorSubConstantOp {
TensorSubConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in - val;
}
__device__ __forceinline__ void operator()(T* v) {
*v -= val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorSubConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorSubConstantOp(half v): val(THC_float2half(-(THC_half2float(v)))) {}
#else
TensorSubConstantOp(half v): fval(-(THC_half2float(v))) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in, val);
#else
float fin = __half2float(*in);
float fout = fin + fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hadd(*v, val);
#else
float fv = __half2float(*v);
fv += fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorMulConstantOp {
TensorMulConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(T* v) {
*v *= val;
}
const T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorMulConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorMulConstantOp(half v) : val(v) {}
#else
TensorMulConstantOp(half v) : fval(THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*in, val);
#else
float fin = __half2float(*in);
float fout = fin * fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hmul(*v, val);
#else
float fv = __half2float(*v);
fv *= fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorDivConstantOp {
TensorDivConstantOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in / val;
}
__device__ __forceinline__ void operator()(T* v) {
*v /= val;
}
const T val;
};
template <>
struct TensorDivConstantOp<float> {
TensorDivConstantOp(float v) : val(1.f / v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
template <>
struct TensorDivConstantOp<double> {
TensorDivConstantOp(double v) : val(1. / v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(double* v) {
*v *= val;
}
const double val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorDivConstantOp<half> {
#ifdef CUDA_HALF_INSTRUCTIONS
TensorDivConstantOp(half v) : val(ScalarInv<half>::to(v)) {}
#else
TensorDivConstantOp(half v) : fval(1.f / THC_half2float(v)) {}
#endif
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*in, val);
#else
float fin = __half2float(*in);
float fout = fin * fval;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
*v = __hmul(*v, val);
#else
float fv = __half2float(*v);
fv *= fval;
*v = __float2half(fv);
#endif
}
#ifdef CUDA_HALF_INSTRUCTIONS
const half val;
#else
const float fval;
#endif
};
#endif // CUDA_HALF_TENSOR
template <int Upper>
struct TensorTriOp {
TensorTriOp(float *start_, long stride0_, long stride1_, long k_)
: start(start_), stride0(stride0_), stride1(stride1_), k(k_) {}
__device__ __forceinline__ int mask(float *in) {
ptrdiff_t n = in - start;
long row, col;
if (stride0 > stride1)
{
row = (long) (n / stride0);
col = (long) ((n % stride0) / stride1);
}
else
{
row = (long) ((n % stride1) / stride0);
col = (long) (n / stride1);
}
return Upper ? (col - row >= k) : (col - row <= k);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = mask(in) ? *in : 0;
}
__device__ __forceinline__ void operator()(float* v) {
if (!mask(v))
*v = 0;
}
const float *start;
const long stride0, stride1, k;
};
void THCudaTensor_tril(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(src_->nDimension == 2, 1, "expected a matrix");
THCudaTensor *src = src_;
if (self_ == src_)
src = THCudaTensor_newContiguous(state, src_);
long stride0 = src->stride[0];
long stride1 = src->stride[1];
float *start = THCudaTensor_data(state, src) + src->storageOffset;
TensorTriOp<0> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1(state, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
if (self_ == src_)
THCudaTensor_freeCopyTo(state, src, src_);
THCudaCheck(cudaGetLastError());
}
void THCudaTensor_triu(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(src_->nDimension == 2, 1, "expected a matrix");
THCudaTensor *src = src_;
if (self_ == src_)
src = THCudaTensor_newContiguous(state, src_);
long stride0 = src->stride[0];
long stride1 = src->stride[1];
float *start = THCudaTensor_data(state, src) + src->storageOffset;
TensorTriOp<1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1(state, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
if (self_ == src_)
THCudaTensor_freeCopyTo(state, src, src_);
THCudaCheck(cudaGetLastError());
}
#include "generic/THCTensorMathPairwise.cu"
#include "THCGenerateAllTypes.h"
// Copy the kth diagonal of a matrix B to a vector A.
__global__ void THCudaTensor_copyFromDiagonal(float* a, float* b, long start, long size, long strideSum, long strideA) {
for (long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const long bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
__global__ void THCudaTensor_copyToDiagonal(float* a, float* b, long start, long size, long strideSum, long strideB) {
for (long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const long aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
void THCudaTensor_diag(THCState *state, THCudaTensor *self_, THCudaTensor *src_, long k){
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
int nDimension = THCudaTensor_nDimension(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
long stride0 = THCudaTensor_stride(state, src_, 0);
long stride1 = THCudaTensor_stride(state, src_, 1);
long size0 = THCudaTensor_size(state, src_, 0);
long size1 = THCudaTensor_size(state, src_, 1);
long size = (k > 0) ? min((long long)size0, (long long)size1 - k) : min((long long)size0 + k, (long long)size1);
THCudaTensor_resize1d(state, self_, size);
long strideSelf = THCudaTensor_stride(state, self_, 0);
const dim3 threads(min((long long)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (long long)size));
dim3 grid(min((long long)1024, (long long)THCCeilDiv(size, (long)threads.x)));
long start = (k >= 0 ? k * stride1 : -k * stride0);
THCudaTensor_copyFromDiagonal<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCudaTensor_data(state, self_), THCudaTensor_data(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
long totalElements = THCudaTensor_nElement(state, src_);
long size = (k > 0) ? totalElements + k : totalElements - k;
long strideSrc = THCudaTensor_stride(state, src_, 0);
THCudaTensor_resize2d(state, self_, size, size);
THCudaTensor_zero(state, self_);
long stride0 = THCudaTensor_stride(state, self_, 0);
long stride1 = THCudaTensor_stride(state, self_, 1);
const dim3 threads(min((long long)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (long long)size));
dim3 grid(min((long long)1024, (long long)THCCeilDiv(size, (long)threads.x)));
long start = (k >= 0 ? k * stride1 : -k * stride0);
THCudaTensor_copyToDiagonal<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCudaTensor_data(state, self_), THCudaTensor_data(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(cudaGetLastError());
}
float THCudaTensor_trace(THCState *state, THCudaTensor *src_) {
THAssert(THCudaTensor_checkGPU(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCudaTensor *diag = THCudaTensor_new(state);
THCudaTensor_diag(state, diag, src_, 0);
float trace = THCudaTensor_sumall(state, diag);
THCudaTensor_free(state, diag);
return trace;
}
|
86755ab027409c646f002b88a4e80d0cd9dd2ece.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <iostream>
# include <stdio.h>
# include <string>
# include <vector>
# include "../../headers/insert_batch/record.h"
# define BLOCK_SIZE 512
extern __constant__ int records_size_device;
extern __constant__ int word_num_device, file_split_num_device;
extern __device__ struct record* records_device;
extern __device__ int* rules_content_device;
extern __device__ int* word_lengths_device;
extern __device__ int* string_start_indexes_device;
extern __device__ char* insert_strings_device;
extern __device__ unsigned int* root_rule_start_offsets_device;
extern __device__ unsigned long long* element_bitmap_device;
extern __device__ int* file_split_indexes_device;
extern __device__ int* rule_split_indexes_device;
extern __device__ int curr_record_num_device;
extern __device__ int* relation_map_device;
struct record* create_record_set(int size){
int malloc_size = sizeof(struct record) * size;
// cout << "malloc size is : " << malloc_size << endl;
struct record* temp_records_device;
hipError_t stat;
stat = hipMalloc(&temp_records_device, malloc_size);
hipMemset(temp_records_device, 0x00, malloc_size);
if(stat){
cout << endl;
cout << "cudamalloc records failed with stat : " << stat << endl;
return NULL;
}
// stat = hipMemcpyToSymbol(records_device, &temp_records_device, sizeof(temp_records_device));
hipMemcpyToSymbol(records_size_device, &size, sizeof(int));
return temp_records_device;
}
__global__ void insert(int* file_indexes, int* insert_offsets, struct insert_update_record* insert_update_records_device, int query_num){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= query_num){
return;
}
int file_index = file_indexes[tid];
int insert_offset = insert_offsets[tid];
int file_start_index = file_split_indexes_device[file_index];
int file_end_index = file_split_indexes_device[file_index + 1];
// printf("%d %d\n", file_index, file_split_indexes_device[1]);
if((insert_offsets < 0) || (root_rule_start_offsets_device[file_end_index - 1] <= insert_offset)){
// printf("%d %d\n", file_end_index - 1, root_rule_start_offsets_device[file_end_index - 1]);
printf("insert invalid. \n");
}
int search_start = file_start_index;
int search_end = file_end_index;
int search_mid = (search_start + search_end) / 2;
while(root_rule_start_offsets_device[search_mid] > insert_offset ||
root_rule_start_offsets_device[search_mid + 1] <= insert_offset){
if(search_start == search_mid){
break;
}
// int last_search_start = search_start;
// int last_search_end = search_end;
int last_search_mid = search_mid;
if(insert_offset < root_rule_start_offsets_device[search_mid]){
search_end = last_search_mid - 1;
}
else{
search_start = last_search_mid;
}
search_mid = (search_start + search_end) / 2;
}
int root_rule_index = search_mid; // the index in root rule to insert
int element_index = rules_content_device[search_mid];
if(element_index < word_num_device){
// if element is word
struct record temp_record;
if(element_bitmap_device[root_rule_index >> 6] & (1ul << (root_rule_index & 0x3f))){
temp_record.no = relation_map_device[root_rule_index];
}
else{
element_bitmap_device[root_rule_index >> 6] =
element_bitmap_device[root_rule_index >> 6] & (1ul << (root_rule_index & 0x3f));
temp_record.no = -1;
}
temp_record.file_index = file_index;
temp_record.file_offset = insert_offset;
temp_record.rule_index = 0;
temp_record.rule_start_offset = 0;
temp_record.rule_location = root_rule_index;
temp_record.replace_word = element_index; // original word
temp_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid] - string_start_indexes_device[tid + 1];
temp_record.content_length = insert_string_length;
int record_index = atomicAdd(&curr_record_num_device, 1);
records_device[record_index] = temp_record;
relation_map_device[root_rule_index] = record_index;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = root_rule_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[record_index] = temp_update_record;
// no update
}
else{
// if element is rule
int curr_offset = root_rule_start_offsets_device[search_mid];
insert_into_rule(element_index - word_num_device, search_mid, curr_offset, file_index, insert_offset, insert_update_records_device, tid);
}
return;
}
__device__ int insert_into_rule(int rule_index, int insert_index, int& curr_offset, int file_index, int insert_offset, struct insert_update_record* insert_update_records_device, int tid){ // search mid is insert index(root rule index)
// int file_end_index = file_split_indexes_device[file_index + 1];
int rule_start_offset = curr_offset; // curr offset is the start offset of rule to insert
int rule_start_index = rule_split_indexes_device[rule_index];
int rule_end_index = rule_split_indexes_device[rule_index + 1];
for(int i = rule_start_index; i < rule_end_index; i ++){
int element_index = rules_content_device[i];
if(element_index < word_num_device && !(element_bitmap_device[i >> 6] & (1ul << (i & 0x3f)))){
// if is word and bit map not set
int new_offset = curr_offset + word_lengths_device[element_index];
if(insert_offset > new_offset){
// keep searching
curr_offset = new_offset;
continue;
}
else{ // end searching
struct record temp_record;
element_bitmap_device[(i >> 6)] = element_bitmap_device[(i >> 6)] | (1ul << (i & 0x3f));
temp_record.no = -1;
// get record_index
int insert_record_index = atomicAdd(&curr_record_num_device, 1);
relation_map_device[i] = insert_record_index;
temp_record.file_index = file_index;
temp_record.file_offset = insert_offset;
temp_record.rule_start_offset = rule_start_offset;
temp_record.rule_index = rule_index;
temp_record.rule_location = i;
temp_record.replace_word = element_index;
temp_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid + 1] - string_start_indexes_device[tid];
temp_record.content_length = insert_string_length;
records_device[insert_record_index] = temp_record;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = insert_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[insert_record_index] = temp_update_record;
// break;
return 1; // inserted, for end
}
}
else if(element_index < word_num_device && element_bitmap_device[i >> 6] && (1ul << (i & 0x3f))){ // if is word but bitmap set
int record_index = relation_map_device[i]; // the last record index of this rule
int content_size = 0;
struct record temp_record = records_device[record_index];
if(temp_record.file_index == file_index &&
temp_record.rule_start_offset == rule_start_offset){ // if in the same location
content_size += temp_record.content_length;
}
while(temp_record.no >= 0){ // view back every record in the same location
record_index = temp_record.no;
// set new temp record
temp_record = records_device[record_index];
if(temp_record.file_index == file_index &&
temp_record.rule_start_offset == rule_start_offset){
content_size += temp_record.content_length;
}
}
content_size += word_lengths_device[records_device[record_index].replace_word];
int new_offset = curr_offset + content_size;
if(insert_offset > new_offset){ // keep searching
curr_offset = new_offset;
continue;
}
else{ // end searching
struct record insert_record;
int insert_record_index = atomicAdd(&curr_record_num_device, 1);
insert_record.no = relation_map_device[i];
relation_map_device[i] = insert_record_index;
insert_record.file_index = file_index;
insert_record.file_offset = insert_offset;
insert_record.rule_start_offset = rule_start_offset;
insert_record.rule_index = rule_index;
insert_record.rule_location = i;
insert_record.replace_word = element_index;
insert_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid + 1] - string_start_indexes_device[tid];
insert_record.content_length = insert_string_length;
records_device[insert_record_index] = insert_record;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = insert_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[record_index] = temp_update_record;
// to update root offset and record offset
// for(int j = insert_index + 1; j < file_end_index; j ++){
// // root_rule_start_offsets_device[j] += insert_string_length;
// atomicAdd(&root_rule_start_offsets_device[j], insert_string_length);
// }
// for(int j = 0; j < insert_record_index; j ++){
// struct record search_record = records_device[j];
// if(file_index == search_record.file_index){
// // if record file index hits, update file offset
// if(search_record.file_offset > insert_offset){
// // records_device[j].file_offset += insert_string_length;
// atomicAdd(&records_device[j].file_offset, insert_string_length);
// }
// if(search_record.rule_start_offset > insert_offset){
// // records.device[j].rule_start_offset += insert_string_length;
// atomicAdd(&records_device[j].rule_start_offset, insert_string_length);
// }
// }
// }
return 1; // record inserted
}
}
else if(element_index >= word_num_device){
// if is still rule
int if_inserted = insert_into_rule(element_index - word_num_device, insert_index, curr_offset, file_index, insert_offset, insert_update_records_device, tid);
if(if_inserted){
return 1;
}
}
}
return 0; // if not inserted
}
__device__ int get_file_index(int root_rule_index){
int file_index = 0;
for(int i = 0; i < file_split_num_device; i ++){
if(root_rule_index < file_split_indexes_device[i + 1]){
break;
}
file_index += 1;
}
return file_index;
}
__global__ void update_root_start_offsets(int range, int record_index, int tid_file_index, struct insert_update_record* insert_update_records_device){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= range){
return;
}
struct insert_update_record temp_update_record = insert_update_records_device[tid];
int file_index = temp_update_record.file_index;
int root_insert_index = temp_update_record.root_insert_index;
int insert_length = temp_update_record.insert_length;
if(root_insert_index < record_index && file_index == tid_file_index){
// root_rule_start_offsets_device[tid] += offset_update;
atomicAdd(&root_rule_start_offsets_device[record_index], insert_length);
}
}
__global__ void insert_update_offsets(struct insert_update_record* insert_update_records_device, int root_size){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= root_size){
return;
}
int tid_file_index = get_file_index(tid);
int offset_update = 0;
// int block_size = BLOCK_SIZE;
// int grid_size = (records_size_device + block_size - 1) / block_size;
// update_root_start_offsets<<<grid_size, block_size>>>(records_size_device, tid, tid_file_index, insert_update_records_device);
// to process the root_rule_start_offset[tid]
for(int i = 0; i < records_size_device; i ++){
struct insert_update_record temp_update_record = insert_update_records_device[i];
int file_index = temp_update_record.file_index;
int root_insert_index = temp_update_record.root_insert_index;
int insert_length = temp_update_record.insert_length;
if(root_insert_index < tid && file_index == tid_file_index){
offset_update += insert_length;
}
}
root_rule_start_offsets_device[tid] += offset_update;
return;
}
__global__ void insert_update_records(struct insert_update_record* insert_update_records_device, int query_size){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= query_size){
return;
}
struct record this_record = records_device[tid];
struct insert_update_record this_update_record = insert_update_records_device[tid];
// to process the records[tid]
for(int i = 0; i < records_size_device; i ++){
struct insert_update_record temp_update_record = insert_update_records_device[i];
if(temp_update_record.file_index == this_update_record.file_index){
if(temp_update_record.insert_offset < this_update_record.insert_offset){
this_record.file_offset += temp_update_record.insert_length;
}
if(temp_update_record.insert_offset < root_rule_start_offsets_device[this_update_record.root_insert_index]){
this_record.rule_start_offset += temp_update_record.insert_length;
}
}
}
records_device[tid] = this_record;
return;
} | 86755ab027409c646f002b88a4e80d0cd9dd2ece.cu | # include <iostream>
# include <stdio.h>
# include <string>
# include <vector>
# include "../../headers/insert_batch/record.h"
# define BLOCK_SIZE 512
extern __constant__ int records_size_device;
extern __constant__ int word_num_device, file_split_num_device;
extern __device__ struct record* records_device;
extern __device__ int* rules_content_device;
extern __device__ int* word_lengths_device;
extern __device__ int* string_start_indexes_device;
extern __device__ char* insert_strings_device;
extern __device__ unsigned int* root_rule_start_offsets_device;
extern __device__ unsigned long long* element_bitmap_device;
extern __device__ int* file_split_indexes_device;
extern __device__ int* rule_split_indexes_device;
extern __device__ int curr_record_num_device;
extern __device__ int* relation_map_device;
struct record* create_record_set(int size){
int malloc_size = sizeof(struct record) * size;
// cout << "malloc size is : " << malloc_size << endl;
struct record* temp_records_device;
cudaError_t stat;
stat = cudaMalloc(&temp_records_device, malloc_size);
cudaMemset(temp_records_device, 0x00, malloc_size);
if(stat){
cout << endl;
cout << "cudamalloc records failed with stat : " << stat << endl;
return NULL;
}
// stat = cudaMemcpyToSymbol(records_device, &temp_records_device, sizeof(temp_records_device));
cudaMemcpyToSymbol(records_size_device, &size, sizeof(int));
return temp_records_device;
}
__global__ void insert(int* file_indexes, int* insert_offsets, struct insert_update_record* insert_update_records_device, int query_num){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= query_num){
return;
}
int file_index = file_indexes[tid];
int insert_offset = insert_offsets[tid];
int file_start_index = file_split_indexes_device[file_index];
int file_end_index = file_split_indexes_device[file_index + 1];
// printf("%d %d\n", file_index, file_split_indexes_device[1]);
if((insert_offsets < 0) || (root_rule_start_offsets_device[file_end_index - 1] <= insert_offset)){
// printf("%d %d\n", file_end_index - 1, root_rule_start_offsets_device[file_end_index - 1]);
printf("insert invalid. \n");
}
int search_start = file_start_index;
int search_end = file_end_index;
int search_mid = (search_start + search_end) / 2;
while(root_rule_start_offsets_device[search_mid] > insert_offset ||
root_rule_start_offsets_device[search_mid + 1] <= insert_offset){
if(search_start == search_mid){
break;
}
// int last_search_start = search_start;
// int last_search_end = search_end;
int last_search_mid = search_mid;
if(insert_offset < root_rule_start_offsets_device[search_mid]){
search_end = last_search_mid - 1;
}
else{
search_start = last_search_mid;
}
search_mid = (search_start + search_end) / 2;
}
int root_rule_index = search_mid; // the index in root rule to insert
int element_index = rules_content_device[search_mid];
if(element_index < word_num_device){
// if element is word
struct record temp_record;
if(element_bitmap_device[root_rule_index >> 6] & (1ul << (root_rule_index & 0x3f))){
temp_record.no = relation_map_device[root_rule_index];
}
else{
element_bitmap_device[root_rule_index >> 6] =
element_bitmap_device[root_rule_index >> 6] & (1ul << (root_rule_index & 0x3f));
temp_record.no = -1;
}
temp_record.file_index = file_index;
temp_record.file_offset = insert_offset;
temp_record.rule_index = 0;
temp_record.rule_start_offset = 0;
temp_record.rule_location = root_rule_index;
temp_record.replace_word = element_index; // original word
temp_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid] - string_start_indexes_device[tid + 1];
temp_record.content_length = insert_string_length;
int record_index = atomicAdd(&curr_record_num_device, 1);
records_device[record_index] = temp_record;
relation_map_device[root_rule_index] = record_index;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = root_rule_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[record_index] = temp_update_record;
// no update
}
else{
// if element is rule
int curr_offset = root_rule_start_offsets_device[search_mid];
insert_into_rule(element_index - word_num_device, search_mid, curr_offset, file_index, insert_offset, insert_update_records_device, tid);
}
return;
}
__device__ int insert_into_rule(int rule_index, int insert_index, int& curr_offset, int file_index, int insert_offset, struct insert_update_record* insert_update_records_device, int tid){ // search mid is insert index(root rule index)
// int file_end_index = file_split_indexes_device[file_index + 1];
int rule_start_offset = curr_offset; // curr offset is the start offset of rule to insert
int rule_start_index = rule_split_indexes_device[rule_index];
int rule_end_index = rule_split_indexes_device[rule_index + 1];
for(int i = rule_start_index; i < rule_end_index; i ++){
int element_index = rules_content_device[i];
if(element_index < word_num_device && !(element_bitmap_device[i >> 6] & (1ul << (i & 0x3f)))){
// if is word and bit map not set
int new_offset = curr_offset + word_lengths_device[element_index];
if(insert_offset > new_offset){
// keep searching
curr_offset = new_offset;
continue;
}
else{ // end searching
struct record temp_record;
element_bitmap_device[(i >> 6)] = element_bitmap_device[(i >> 6)] | (1ul << (i & 0x3f));
temp_record.no = -1;
// get record_index
int insert_record_index = atomicAdd(&curr_record_num_device, 1);
relation_map_device[i] = insert_record_index;
temp_record.file_index = file_index;
temp_record.file_offset = insert_offset;
temp_record.rule_start_offset = rule_start_offset;
temp_record.rule_index = rule_index;
temp_record.rule_location = i;
temp_record.replace_word = element_index;
temp_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid + 1] - string_start_indexes_device[tid];
temp_record.content_length = insert_string_length;
records_device[insert_record_index] = temp_record;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = insert_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[insert_record_index] = temp_update_record;
// break;
return 1; // inserted, for end
}
}
else if(element_index < word_num_device && element_bitmap_device[i >> 6] && (1ul << (i & 0x3f))){ // if is word but bitmap set
int record_index = relation_map_device[i]; // the last record index of this rule
int content_size = 0;
struct record temp_record = records_device[record_index];
if(temp_record.file_index == file_index &&
temp_record.rule_start_offset == rule_start_offset){ // if in the same location
content_size += temp_record.content_length;
}
while(temp_record.no >= 0){ // view back every record in the same location
record_index = temp_record.no;
// set new temp record
temp_record = records_device[record_index];
if(temp_record.file_index == file_index &&
temp_record.rule_start_offset == rule_start_offset){
content_size += temp_record.content_length;
}
}
content_size += word_lengths_device[records_device[record_index].replace_word];
int new_offset = curr_offset + content_size;
if(insert_offset > new_offset){ // keep searching
curr_offset = new_offset;
continue;
}
else{ // end searching
struct record insert_record;
int insert_record_index = atomicAdd(&curr_record_num_device, 1);
insert_record.no = relation_map_device[i];
relation_map_device[i] = insert_record_index;
insert_record.file_index = file_index;
insert_record.file_offset = insert_offset;
insert_record.rule_start_offset = rule_start_offset;
insert_record.rule_index = rule_index;
insert_record.rule_location = i;
insert_record.replace_word = element_index;
insert_record.content = &insert_strings_device[string_start_indexes_device[tid]];
int insert_string_length = string_start_indexes_device[tid + 1] - string_start_indexes_device[tid];
insert_record.content_length = insert_string_length;
records_device[insert_record_index] = insert_record;
struct insert_update_record temp_update_record;
temp_update_record.file_index = file_index;
temp_update_record.insert_offset = insert_offset;
temp_update_record.root_insert_index = insert_index;
temp_update_record.insert_length = insert_string_length;
insert_update_records_device[record_index] = temp_update_record;
// to update root offset and record offset
// for(int j = insert_index + 1; j < file_end_index; j ++){
// // root_rule_start_offsets_device[j] += insert_string_length;
// atomicAdd(&root_rule_start_offsets_device[j], insert_string_length);
// }
// for(int j = 0; j < insert_record_index; j ++){
// struct record search_record = records_device[j];
// if(file_index == search_record.file_index){
// // if record file index hits, update file offset
// if(search_record.file_offset > insert_offset){
// // records_device[j].file_offset += insert_string_length;
// atomicAdd(&records_device[j].file_offset, insert_string_length);
// }
// if(search_record.rule_start_offset > insert_offset){
// // records.device[j].rule_start_offset += insert_string_length;
// atomicAdd(&records_device[j].rule_start_offset, insert_string_length);
// }
// }
// }
return 1; // record inserted
}
}
else if(element_index >= word_num_device){
// if is still rule
int if_inserted = insert_into_rule(element_index - word_num_device, insert_index, curr_offset, file_index, insert_offset, insert_update_records_device, tid);
if(if_inserted){
return 1;
}
}
}
return 0; // if not inserted
}
__device__ int get_file_index(int root_rule_index){
int file_index = 0;
for(int i = 0; i < file_split_num_device; i ++){
if(root_rule_index < file_split_indexes_device[i + 1]){
break;
}
file_index += 1;
}
return file_index;
}
__global__ void update_root_start_offsets(int range, int record_index, int tid_file_index, struct insert_update_record* insert_update_records_device){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= range){
return;
}
struct insert_update_record temp_update_record = insert_update_records_device[tid];
int file_index = temp_update_record.file_index;
int root_insert_index = temp_update_record.root_insert_index;
int insert_length = temp_update_record.insert_length;
if(root_insert_index < record_index && file_index == tid_file_index){
// root_rule_start_offsets_device[tid] += offset_update;
atomicAdd(&root_rule_start_offsets_device[record_index], insert_length);
}
}
__global__ void insert_update_offsets(struct insert_update_record* insert_update_records_device, int root_size){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= root_size){
return;
}
int tid_file_index = get_file_index(tid);
int offset_update = 0;
// int block_size = BLOCK_SIZE;
// int grid_size = (records_size_device + block_size - 1) / block_size;
// update_root_start_offsets<<<grid_size, block_size>>>(records_size_device, tid, tid_file_index, insert_update_records_device);
// to process the root_rule_start_offset[tid]
for(int i = 0; i < records_size_device; i ++){
struct insert_update_record temp_update_record = insert_update_records_device[i];
int file_index = temp_update_record.file_index;
int root_insert_index = temp_update_record.root_insert_index;
int insert_length = temp_update_record.insert_length;
if(root_insert_index < tid && file_index == tid_file_index){
offset_update += insert_length;
}
}
root_rule_start_offsets_device[tid] += offset_update;
return;
}
__global__ void insert_update_records(struct insert_update_record* insert_update_records_device, int query_size){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= query_size){
return;
}
struct record this_record = records_device[tid];
struct insert_update_record this_update_record = insert_update_records_device[tid];
// to process the records[tid]
for(int i = 0; i < records_size_device; i ++){
struct insert_update_record temp_update_record = insert_update_records_device[i];
if(temp_update_record.file_index == this_update_record.file_index){
if(temp_update_record.insert_offset < this_update_record.insert_offset){
this_record.file_offset += temp_update_record.insert_length;
}
if(temp_update_record.insert_offset < root_rule_start_offsets_device[this_update_record.root_insert_index]){
this_record.rule_start_offset += temp_update_record.insert_length;
}
}
}
records_device[tid] = this_record;
return;
} |
0d3ae4de7c9dfc404964eeadb0b224910a441e2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if __CUDACC_VER_MAJOR__ >= 8
#include <chrono>
#include <condition_variable>
#include <memory>
#include <thread>
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Demand_GPUToHostMt"
typedef std::chrono::time_point<std::chrono::system_clock> time_point_t;
std::condition_variable cv;
std::mutex m;
volatile bool ready = false;
static void cpu_write(char *ptr, const size_t n, const size_t stride,
time_point_t *start, time_point_t *stop) {
{
std::unique_lock<std::mutex> lk(m);
while (!ready)
cv.wait(lk);
}
*start = std::chrono::system_clock::now();
for (size_t i = 0; i < n; i += stride) {
benchmark::DoNotOptimize(ptr[i] = 0);
}
*stop = std::chrono::system_clock::now();
}
template <bool NOOP = false>
__global__ void gpu_write(char *ptr, const size_t count, const size_t stride) {
if (NOOP) {
return;
}
// global ID
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
// lane ID 0-31
const size_t lx = gx & 31;
// warp ID
size_t wx = gx / 32;
const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32;
if (0 == lx) {
for (size_t i = wx * stride; i < count; i += numWarps * stride) {
ptr[i] = i;
}
}
}
auto Comm_UM_Demand_GPUToHost_Mt = [](benchmark::State &state,
const int numa_id, const int cuda_id,
const int num_threads) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::ScopedBind binder(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(hipSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(hipMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform hipMallocManaged");
return;
}
defer(hipFree(ptr));
if (PRINT_IF_ERROR(hipMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform hipMemset");
return;
}
std::vector<std::thread> workers(num_threads);
std::vector<time_point_t> starts(num_threads);
std::vector<time_point_t> stops(num_threads);
for (auto _ : state) {
flush_all(ptr, bytes);
if (PRINT_IF_ERROR(hipMemAdvise(
ptr, bytes, hipMemAdviseSetPreferredLocation, cuda_id))) {
state.SkipWithError(NAME " failed to advise");
return;
}
if (PRINT_IF_ERROR(hipMemPrefetchAsync(ptr, bytes, cuda_id))) {
state.SkipWithError(NAME " failed to prefetch");
return;
}
if (PRINT_IF_ERROR(hipDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
// touch each page
// gpu_write<<<256, 256>>>(ptr, bytes, 1);
// if (PRINT_IF_ERROR(hipDeviceSynchronize())) {
// state.SkipWithError(NAME " failed to synchronize");
// return;
// }
if (PRINT_IF_ERROR(hipMemAdvise(
ptr, bytes, hipMemAdviseSetPreferredLocation, hipCpuDeviceId))) {
state.SkipWithError(NAME " failed to advise");
return;
}
if (PRINT_IF_ERROR(hipDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
// Create all threads
for (int i = 0; i < num_threads; ++i) {
workers[i] =
std::thread(cpu_write, &ptr[i * bytes / num_threads],
bytes / num_threads, page_size(), &starts[i], &stops[i]);
}
auto start = std::chrono::system_clock::now();
// unleash threads
{
std::unique_lock<std::mutex> lk(m);
ready = true;
cv.notify_all();
}
for (auto &w : workers) {
w.join();
}
auto stop = std::chrono::system_clock::now();
ready = false;
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(stop - start)
.count();
double maxElapsed = 0;
for (const auto threadStart : starts) {
for (const auto threadStop : stops) {
auto threadElapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(threadStop -
threadStart)
.count();
maxElapsed = ::max(maxElapsed, threadElapsed);
}
}
state.SetIterationTime(maxElapsed);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
};
static void registerer() {
for (auto num_threads : {1, 2, 4, 6, 8, 10}) {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id) + "/" +
std::to_string(num_threads);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Demand_GPUToHost_Mt,
numa_id, cuda_id, num_threads)
->SMALL_ARGS()
->UseManualTime()
->MinTime(0.1);
}
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
| 0d3ae4de7c9dfc404964eeadb0b224910a441e2d.cu | #if __CUDACC_VER_MAJOR__ >= 8
#include <chrono>
#include <condition_variable>
#include <memory>
#include <thread>
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Demand_GPUToHostMt"
typedef std::chrono::time_point<std::chrono::system_clock> time_point_t;
std::condition_variable cv;
std::mutex m;
volatile bool ready = false;
static void cpu_write(char *ptr, const size_t n, const size_t stride,
time_point_t *start, time_point_t *stop) {
{
std::unique_lock<std::mutex> lk(m);
while (!ready)
cv.wait(lk);
}
*start = std::chrono::system_clock::now();
for (size_t i = 0; i < n; i += stride) {
benchmark::DoNotOptimize(ptr[i] = 0);
}
*stop = std::chrono::system_clock::now();
}
template <bool NOOP = false>
__global__ void gpu_write(char *ptr, const size_t count, const size_t stride) {
if (NOOP) {
return;
}
// global ID
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
// lane ID 0-31
const size_t lx = gx & 31;
// warp ID
size_t wx = gx / 32;
const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32;
if (0 == lx) {
for (size_t i = wx * stride; i < count; i += numWarps * stride) {
ptr[i] = i;
}
}
}
auto Comm_UM_Demand_GPUToHost_Mt = [](benchmark::State &state,
const int numa_id, const int cuda_id,
const int num_threads) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::ScopedBind binder(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(cudaSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(cudaMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMallocManaged");
return;
}
defer(cudaFree(ptr));
if (PRINT_IF_ERROR(cudaMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMemset");
return;
}
std::vector<std::thread> workers(num_threads);
std::vector<time_point_t> starts(num_threads);
std::vector<time_point_t> stops(num_threads);
for (auto _ : state) {
flush_all(ptr, bytes);
if (PRINT_IF_ERROR(cudaMemAdvise(
ptr, bytes, cudaMemAdviseSetPreferredLocation, cuda_id))) {
state.SkipWithError(NAME " failed to advise");
return;
}
if (PRINT_IF_ERROR(cudaMemPrefetchAsync(ptr, bytes, cuda_id))) {
state.SkipWithError(NAME " failed to prefetch");
return;
}
if (PRINT_IF_ERROR(cudaDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
// touch each page
// gpu_write<<<256, 256>>>(ptr, bytes, 1);
// if (PRINT_IF_ERROR(cudaDeviceSynchronize())) {
// state.SkipWithError(NAME " failed to synchronize");
// return;
// }
if (PRINT_IF_ERROR(cudaMemAdvise(
ptr, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId))) {
state.SkipWithError(NAME " failed to advise");
return;
}
if (PRINT_IF_ERROR(cudaDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
// Create all threads
for (int i = 0; i < num_threads; ++i) {
workers[i] =
std::thread(cpu_write, &ptr[i * bytes / num_threads],
bytes / num_threads, page_size(), &starts[i], &stops[i]);
}
auto start = std::chrono::system_clock::now();
// unleash threads
{
std::unique_lock<std::mutex> lk(m);
ready = true;
cv.notify_all();
}
for (auto &w : workers) {
w.join();
}
auto stop = std::chrono::system_clock::now();
ready = false;
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(stop - start)
.count();
double maxElapsed = 0;
for (const auto threadStart : starts) {
for (const auto threadStop : stops) {
auto threadElapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(threadStop -
threadStart)
.count();
maxElapsed = std::max(maxElapsed, threadElapsed);
}
}
state.SetIterationTime(maxElapsed);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
};
static void registerer() {
for (auto num_threads : {1, 2, 4, 6, 8, 10}) {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id) + "/" +
std::to_string(num_threads);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Demand_GPUToHost_Mt,
numa_id, cuda_id, num_threads)
->SMALL_ARGS()
->UseManualTime()
->MinTime(0.1);
}
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
|
d37f10233a0031226274a7be4113dc4e124320cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************/
// A now optimized Multigrid Solver for the Heat Equation //
// Course Material for HPCSE-II, Spring 2019, ETH Zurich //
// Authors: Sergio Martin, Georgios Arampatzis //
// License: Use if you like, but give us credit. //
/**********************************************************************/
#include <stdio.h>
#include <math.h>
#include <limits>
#include "heat2d_gpu.hpp"
#include "string.h"
#include <chrono>
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess !=err )
{
fprintf(stderr," CUDA Error: %s: %s. \n",msg, hipGetErrorString(err));
exit(-1);
}
}
void custommemcpyToHost(gridLevel* g, size_t l){
for (size_t i = 0; i < g[l].N; i++){
hipMemcpy(g[l].f[i], &g[l].df[i*g[l].N], g[l].N*sizeof(double),hipMemcpyDeviceToHost); checkCUDAError("cpy f To Host");
hipMemcpy(g[l].U[i], &g[l].dU[i*g[l].N], g[l].N*sizeof(double),hipMemcpyDeviceToHost); checkCUDAError("cpy U To Host");
hipMemcpy(g[l].Un[i], &g[l].dUn[i*g[l].N], g[l].N*sizeof(double),hipMemcpyDeviceToHost); checkCUDAError("cpy Un To Host");
hipMemcpy(g[l].Res[i], &g[l].dRes[i*g[l].N], g[l].N*sizeof(double),hipMemcpyDeviceToHost); checkCUDAError("cpy Res To Host");
hipDeviceSynchronize();
}
}
void custommemcpyToDevice(gridLevel* g, size_t l){
for (size_t i = 0; i < g[l].N; i++){
hipMemcpy(&g[l].df[i*g[l].N], g[l].f[i], g[l].N*sizeof(double),hipMemcpyHostToDevice); checkCUDAError("cpy f To Device");
hipMemcpy(&g[l].dU[i*g[l].N], g[l].U[i], g[l].N*sizeof(double),hipMemcpyHostToDevice); checkCUDAError("cpy U To Device");
hipMemcpy(&g[l].dUn[i*g[l].N], g[l].Un[i], g[l].N*sizeof(double),hipMemcpyHostToDevice); checkCUDAError("cpy Un To Device");
hipMemcpy(&g[l].dRes[i*g[l].N],g[l].Res[i], g[l].N*sizeof(double),hipMemcpyHostToDevice); checkCUDAError("cpy Res To Device");
hipDeviceSynchronize();
}
}
pointsInfo __p;
int main(int argc, char* argv[])
{
double tolerance = 1e-0; // L2 Difference Tolerance before reaching convergence.
size_t N0 = 10; // 2^N0 + 1 elements per side
// Multigrid parameters -- Find the best configuration!
size_t gridCount = N0-1; // Number of Multigrid levels to use
size_t downRelaxations = 5; // Number of Relaxations before restriction
size_t upRelaxations = 0; // Number of Relaxations after prolongation)
//I coudln't find a much better configuration, as the times are so small already
gridLevel* g = generateInitialConditions(N0, gridCount);
// gridLevel* d_g;
// hipMalloc(&d_g, sizeof(gridLevel) * gridCount);checkCUDAError("Alloc error");
// hipMemcpy(d_g,&g,sizeof(gridLevel) * gridCount, hipMemcpyHostToDevice);checkCUDAError("Memcpy error");
// hipDeviceSynchronize();
//for (size_t grid = 1; grid < gridCount; grid++) custommemcpyToDevice(g,grid);
auto startTime = std::chrono::system_clock::now();
while (g[0].L2NormDiff > tolerance) // Multigrid solver start
{
applyJacobi(g, 0, downRelaxations); // Relaxing the finest grid first
calculateResidual(g, 0); // Calculating Initial Residual
for (size_t grid = 1; grid < gridCount; grid++) // Going down the V-Cycle
{
applyRestriction(g, grid); // Restricting the residual to the coarser grid's solution vector (f)
applyJacobi(g, grid, downRelaxations); // Smoothing coarser level
calculateResidual(g, grid); // Calculating Coarse Grid Residual
}
for (size_t grid = gridCount-1; grid > 0; grid--) // Going up the V-Cycle
{
applyProlongation(g, grid); // Prolonging solution for coarser level up to finer level
applyJacobi(g, grid, upRelaxations); // Smoothing finer level
}
//custommemcpyToHost(g,grid);
printf("L2Norm: %.4f\n", g[0].L2Norm);
calculateL2Norm(g, 0); // Calculating Residual L2 Norm
} // Multigrid solver end
hipDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
totalTime = std::chrono::duration<double>(endTime-startTime).count();
printTimings(gridCount);
printf("L2Norm: %.4f\n", g[0].L2Norm);
freeGrids(g, gridCount);
return 0;
}
//kernel function. Works only on the assigned thread Eex9Koh4pha-
__global__ void applyJacobiKernel( size_t N, double h1, double h2, double* Un, double* f, double* U)
{
size_t myRow = blockIdx.y*blockDim.y + threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x + threadIdx.x;
if(myRow >= N-1 || myCol >= N-1)return;
if(myRow < 1 || myCol < 1 )return;
U[myRow*N + myCol] = (Un[(myRow-1)*N + myCol]+Un[(myRow+1)*N + myCol]+Un[myRow*N + myCol-1]+Un[myRow*N + myCol+1]+f[myRow*N+myCol]*h2)*h1;
//g[l].U[myRow][myCol] = (g[l].Un[myRow-1][myCol] + g[l].Un[myRow+1][myCol] + g[l].Un[myRow][myCol-1] + g[l].Un[myRow][myCol+1] + g[l].f[myRow][myCol]*h2)*h1;
//__syncthreads();
}
void applyJacobi(gridLevel* g, size_t l, size_t relaxations)
{
//custommemcpyToDevice(g,l);
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}//depends on the size of the problem. int or dim3
auto t0 = std::chrono::system_clock::now();
double h1 = 0.25;
double h2 = g[l].h*g[l].h;
for (size_t r = 0; r < relaxations; r++)
{
//*
double* tmp = g[l].dUn; g[l].dUn = g[l].dU; g[l].dU = tmp;
hipLaunchKernelGGL(( applyJacobiKernel), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, h1, h2, g[l].dUn, g[l].df, g[l].dU);checkCUDAError("apply Jacobi error");
//hipDeviceSynchronize();
//*/
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++) // Perform a Jacobi Iteration
g[l].U[i][j] = (g[l].Un[i-1][j] + g[l].Un[i+1][j] + g[l].Un[i][j-1] + g[l].Un[i][j+1] + g[l].f[i][j]*h2)*h1;
//*/
}
hipDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
smoothingTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void calculateResidualKernel( size_t N, double h2, double* Res, double* f, double* U)
{
size_t myRow = blockIdx.y*blockDim.y + threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x + threadIdx.x;
if(myRow >= N-1 || myCol >= N-1)return;
if(myRow < 1 || myCol < 1 )return;
Res[myRow*N + myCol] = f[myRow*N + myCol] + (U[(myRow-1)*N+myCol] + U[(myRow+1)*N + myCol] - 4*U[myRow*N +myCol] + U[myRow*N +myCol-1] + U[myRow*N +myCol+1]) * h2;
}
void calculateResidual(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g,l);
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
auto t0 = std::chrono::system_clock::now();
double h2 = 1.0 / pow(g[l].h,2);
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l].Res[i][j] = g[l].f[i][j] + (g[l].U[i-1][j] + g[l].U[i+1][j] - 4*g[l].U[i][j] + g[l].U[i][j-1] + g[l].U[i][j+1]) * h2;
*/
//*
hipLaunchKernelGGL(( calculateResidualKernel), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, h2, g[l].dRes, g[l].df, g[l].dU);checkCUDAError("Calculate Residual Kernel error");
// hipDeviceSynchronize();
//*/
hipDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
residualTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void calculateSquareKernel(double *Res, size_t N)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= N || j >= N)return;
Res[i*N+j] = Res[i*N+j]*Res[i*N+j];
}
#define BLOCKSIZE 1024
//from the class example 3
__global__ void reduce(double* dVec, double* dAux, size_t N,size_t N2)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
if(i> N2) return;
sdata[tid] = dVec[i];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
//Eex9Koh4pha-
void calculateL2Norm(gridLevel* g, size_t l)
{
//double tmp = 0.0;
auto t0 = std::chrono::system_clock::now();
//*custommemcpyToDevice(g,l);
//*
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}hipLaunchKernelGGL((
calculateSquareKernel), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, g[l].dRes, N);checkCUDAError("Calculate square Kernel error");
hipDeviceSynchronize();
double *dtemp;
hipMalloc(&dtemp, sizeof(double)*N*N); checkCUDAError("Error allocating dtemp");
hipMemset(dtemp, 0.0, sizeof(double)*N*N);checkCUDAError("Error memset dtemp");
double *dRescpy; //sizeof(double)*BLOCKSIZE*BLOCKSIZE*4
hipMalloc(&dRescpy, sizeof(double)*N*N); checkCUDAError("Error allocating dtemp");
//hipMemset(dRescpy, 0.0, sizeof(double)*N*N);checkCUDAError("Error memset dtemp");
hipDeviceSynchronize();
hipMemcpy(dRescpy, g[l].dRes, sizeof(double)*N*N,hipMemcpyDeviceToDevice); checkCUDAError("cpy Res To Host in L2");
hipDeviceSynchronize();
for (size_t n =BLOCKSIZE*BLOCKSIZE*4; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE; if (bSize > n) bSize = n;
size_t gSize = ceil((double)n / (double)BLOCKSIZE); if (bSize > n) gSize = 1;
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
hipLaunchKernelGGL(( reduce), dim3(gSize), dim3(bSize),BLOCKSIZE*sizeof(unsigned int), 0, dRescpy, dtemp, n, N*N);
double *tmp = dRescpy; dRescpy = dtemp; dtemp = tmp;
}
hipDeviceSynchronize();
/*
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++)
g[l].Res[i][j] = g[l].Res[i][j]*g[l].Res[i][j];
//*/
/*
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++)
tmp += temp[i*N+j];
//*/
//printf("%d \n", tmp);
double result = 0.0;
hipMemcpy(&result, dRescpy, sizeof(double), hipMemcpyDeviceToHost); checkCUDAError("error copying result");
//hipMemcpy(&result2, dRescpy, sizeof(double), hipMemcpyDeviceToHost); checkCUDAError("error copying result2");
hipDeviceSynchronize();
g[l].L2Norm = sqrt(result);
g[l].L2NormDiff = fabs(g[l].L2NormPrev - g[l].L2Norm);
g[l].L2NormPrev = g[l].L2Norm;
hipDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
L2NormTime[l] += std::chrono::duration<double>(t1-t0).count();
hipFree(dtemp);
hipFree(dRescpy);
//custommemcpyToDevice(g,l);
}
//Eex9Koh4pha-
//kernel fct. Eex9Koh4pha-
__global__ void applyRestrictionKernel( size_t N, size_t N2, double* Res, double* f)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
double c =0.0;
if(i >= N-1 || j >= N-1)return;
if(i < 1 || j < 1 )return;
c = ( 1.0*( Res[(2*i-1)*N2 +(2*j-1)] + Res[(2*i-1)*N2 +(2*j+1)] + Res[(2*i+1)*N2 +(2*j-1)] + Res[(2*i+1)*N2 +(2*j+1)] ) +
2.0*( Res[(2*i-1)*N2 +2*j] + Res[(2*i)*N2 +(2*j-1)] + Res[(2*i+1)*N2 +2*j] + Res[(2*i)*N2 +(2*j+1)] ) +
4.0*( Res[(2*i)*N2 +(2*j)] ) ) * 0.0625;
f[i*N +j]=c;
}
void applyRestriction(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g, l);custommemcpyToDevice(g, l-1);
int N = g[l].N;
int N2 = g[l-1].N;//
double *temp;
hipMalloc(&temp, sizeof(double) * N2 * N2); checkCUDAError("Alloc error temp");
hipMemcpy(temp,g[l-1].dRes, N2*N2*sizeof(double),hipMemcpyDeviceToDevice); checkCUDAError("cpy temp");
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
auto t0 = std::chrono::system_clock::now();
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l].f[i][j] = ( 1.0*( g[l-1].Res[2*i-1][2*j-1] + g[l-1].Res[2*i-1][2*j+1] + g[l-1].Res[2*i+1][2*j-1] + g[l-1].Res[2*i+1][2*j+1] ) +
2.0*( g[l-1].Res[2*i-1][2*j] + g[l-1].Res[2*i][2*j-1] + g[l-1].Res[2*i+1][2*j] + g[l-1].Res[2*i][2*j+1] ) +
4.0*( g[l-1].Res[2*i][2*j] ) ) * 0.0625;
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++) // Resetting U vector for the coarser level before smoothing -- Find out if this is really necessary.
g[l].U[i][j] = 0;
//*/
//*
hipLaunchKernelGGL(( applyRestrictionKernel), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, N2, temp, g[l].df);checkCUDAError("Apply Restriction Kernel error");
//*/
hipDeviceSynchronize();
hipMemset(g[l].dU, 0.0, sizeof(double)* N*N);// Resetting U vector for the coarser level before smoothing
auto t1 = std::chrono::system_clock::now();
restrictionTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void applyProlongationKernel1( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N-1 && j < N-1) Um[2*i * N2 + 2*j] += U[i* N + j];
}
__global__ void applyProlongationKernel2( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N && j < N-1) Um[(2*i-1)* N2 + 2*j] += ( U[(i-1)* N + j] + U[i* N + j] ) *0.5;
}
__global__ void applyProlongationKernel3( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N-1 && j < N) Um[2*i* N2 + (2*j-1)] += ( U[i* N + (j-1)] + U[i* N + j] ) *0.5;
}
__global__ void applyProlongationKernel4( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N && j < N) Um[(2*i-1)* N2 + (2*j-1)] += ( U[(i-1)* N + (j-1)] + U[(i-1)* N + j] + U[i* N + (j-1)] + U[i* N + j] ) *0.25;
}
void applyProlongation(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g, l);
//custommemcpyToDevice(g, l-1);
int N = g[l].N;
int N2 = g[l-1].N;
double *temp;
hipMalloc(&temp, sizeof(double) * g[l-1].N * g[l-1].N);checkCUDAError("Alloc error temp");
hipMemcpy(temp, g[l-1].dU, g[l-1].N*g[l-1].N*sizeof(double),hipMemcpyDeviceToDevice); checkCUDAError("cpy temp dres");
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
//2 pointers to U from 2 different levels of g
//Kernel size is definded by g[l] not g [l-1]
auto t0 = std::chrono::system_clock::now();
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l-1].U[2*i][2*j] += g[l].U[i][j];
for (size_t i = 1; i < g[l].N; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l-1].U[2*i-1][2*j] += ( g[l].U[i-1][j] + g[l].U[i][j] ) *0.5;
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N; j++)
g[l-1].U[2*i][2*j-1] += ( g[l].U[i][j-1] + g[l].U[i][j] ) *0.5;
for (size_t i = 1; i < g[l].N; i++)
for (size_t j = 1; j < g[l].N; j++)
g[l-1].U[2*i-1][2*j-1] += ( g[l].U[i-1][j-1] + g[l].U[i-1][j] + g[l].U[i][j-1] + g[l].U[i][j] ) *0.25;
//*/
hipLaunchKernelGGL(( applyProlongationKernel1), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, N2, g[l].dU, temp);
hipLaunchKernelGGL(( applyProlongationKernel2), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, N2, g[l].dU, temp);
hipLaunchKernelGGL(( applyProlongationKernel3), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, N2, g[l].dU, temp);
hipLaunchKernelGGL(( applyProlongationKernel4), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, N, N2, g[l].dU, temp);
hipDeviceSynchronize();
hipMemcpy(g[l-1].dU, temp, g[l-1].N*g[l-1].N*sizeof(double),hipMemcpyDeviceToDevice); checkCUDAError("cpy temp");
hipDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
prolongTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l-1);
}
gridLevel* generateInitialConditions(size_t N0, size_t gridCount)
{
// Default values:
__p.nCandles = 4;
std::vector<double> pars;
pars.push_back(0.228162);
pars.push_back(0.226769);
pars.push_back(0.437278);
pars.push_back(0.0492324);
pars.push_back(0.65915);
pars.push_back(0.499616);
pars.push_back(0.59006);
pars.push_back(0.0566329);
pars.push_back(0.0186672);
pars.push_back(0.894063);
pars.push_back(0.424229);
pars.push_back(0.047725);
pars.push_back(0.256743);
pars.push_back(0.754483);
pars.push_back(0.490461);
pars.push_back(0.0485152);
// Allocating Timers
smoothingTime = (double*) calloc (gridCount, sizeof(double));
residualTime = (double*) calloc (gridCount, sizeof(double));
restrictionTime = (double*) calloc (gridCount, sizeof(double));
prolongTime = (double*) calloc (gridCount, sizeof(double));
L2NormTime = (double*) calloc (gridCount, sizeof(double));
// Allocating Grids
gridLevel* g = (gridLevel*) malloc(sizeof(gridLevel) * gridCount);
for (size_t i = 0; i < gridCount; i++)
{
g[i].N = pow(2, N0-i) + 1;
g[i].h = 1.0/(g[i].N-1);
g[i].U = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].U[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].Un = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].Un[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].Res = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].Res[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].f = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].f[j] = (double*) malloc(sizeof(double) * g[i].N);
hipMalloc(&g[i].dU, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_U");
hipMalloc(&g[i].dRes, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_Res");
hipMalloc(&g[i].dUn, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_Un");
hipMalloc(&g[i].df, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_f");
hipDeviceSynchronize();
g[i].L2Norm = 0.0;
g[i].L2NormPrev = std::numeric_limits<double>::max();
g[i].L2NormDiff = std::numeric_limits<double>::max();
}
// Initial Guess
for (size_t i = 0; i < g[0].N; i++) for (size_t j = 0; j < g[0].N; j++) g[0].U[i][j] = 1.0;
// Boundary Conditions
for (size_t i = 0; i < g[0].N; i++) g[0].U[0][i] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[g[0].N-1][i] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[i][0] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[i][g[0].N-1] = 0.0;
// F
for (size_t i = 0; i < g[0].N; i++){
for (size_t j = 0; j < g[0].N; j++)
{
double h = 1.0/(g[0].N-1);
double x = i*h;
double y = j*h;
g[0].f[i][j] = 0.0;
for (size_t c = 0; c < __p.nCandles; c++)
{
double c3 = pars[c*4 + 0]; // x0
double c4 = pars[c*4 + 1]; // y0
double c1 = pars[c*4 + 2]; c1 *= 100000;// intensity
double c2 = pars[c*4 + 3]; c2 *= 0.01;// Width
g[0].f[i][j] += c1*exp(-(pow(c4 - y, 2) + pow(c3 - x, 2)) / c2);
}
}
hipMemcpy(&g[0].df[i*g[0].N], g[0].f[i],g[0].N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(&g[0].dU[i*g[0].N], g[0].U[i],g[0].N*sizeof(double),hipMemcpyHostToDevice);
}
hipMemset(g[0].dUn, 0, g[0].N*g[0].N*sizeof(double));
hipMemset(g[0].dRes, 0, g[0].N*g[0].N*sizeof(double));
return g;
}
void freeGrids(gridLevel* g, size_t gridCount)
{
for (size_t i = 0; i < gridCount; i++)
{
for (size_t j = 0; j < g[i].N ; j++) free(g[i].U[j]);
for (size_t j = 0; j < g[i].N ; j++) free(g[i].f[j]);
for (size_t j = 0; j < g[i].N ; j++) free(g[i].Res[j]);
free(g[i].U);
free(g[i].Un);
free(g[i].f);
free(g[i].Res);
hipFree(g[i].dU);
hipFree(g[i].dUn);
hipFree(g[i].df);
hipFree(g[i].dRes);
}
free(g);
}
void printTimings(size_t gridCount)
{
double* timePerGrid = (double*) calloc (sizeof(double), gridCount);
double totalSmoothingTime = 0.0;
double totalResidualTime = 0.0;
double totalRestrictionTime = 0.0;
double totalProlongTime = 0.0;
double totalL2NormTime = 0.0;
for (size_t i = 0; i < gridCount; i++) timePerGrid[i] = smoothingTime[i] + residualTime[i] + restrictionTime[i] + prolongTime[i] + L2NormTime[i];
for (size_t i = 0; i < gridCount; i++) totalSmoothingTime += smoothingTime[i];
for (size_t i = 0; i < gridCount; i++) totalResidualTime += residualTime[i];
for (size_t i = 0; i < gridCount; i++) totalRestrictionTime += restrictionTime[i];
for (size_t i = 0; i < gridCount; i++) totalProlongTime += prolongTime[i];
for (size_t i = 0; i < gridCount; i++) totalL2NormTime += L2NormTime[i];
double totalMeasured = totalSmoothingTime + totalResidualTime + totalRestrictionTime + totalProlongTime + totalL2NormTime;
printf(" Time (s) "); for (size_t i = 0; i < gridCount; i++) printf("Grid%lu ", i); printf(" Total \n");
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("Smoothing | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", smoothingTime[i]); printf("| %2.3f \n", totalSmoothingTime);
printf("Residual | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", residualTime[i]); printf("| %2.3f \n", totalResidualTime);
printf("Restriction | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", restrictionTime[i]); printf("| %2.3f \n", totalRestrictionTime);
printf("Prolongation | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", prolongTime[i]); printf("| %2.3f \n", totalProlongTime);
printf("L2Norm | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", L2NormTime[i]); printf("| %2.3f \n", totalL2NormTime);
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("Total | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", timePerGrid[i]); printf("| %2.3f \n", totalMeasured);
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("\n");
printf("Running Time : %.3fs\n", totalTime);
}
| d37f10233a0031226274a7be4113dc4e124320cf.cu | /**********************************************************************/
// A now optimized Multigrid Solver for the Heat Equation //
// Course Material for HPCSE-II, Spring 2019, ETH Zurich //
// Authors: Sergio Martin, Georgios Arampatzis //
// License: Use if you like, but give us credit. //
/**********************************************************************/
#include <stdio.h>
#include <math.h>
#include <limits>
#include "heat2d_gpu.hpp"
#include "string.h"
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess !=err )
{
fprintf(stderr," CUDA Error: %s: %s. \n",msg, cudaGetErrorString(err));
exit(-1);
}
}
void custommemcpyToHost(gridLevel* g, size_t l){
for (size_t i = 0; i < g[l].N; i++){
cudaMemcpy(g[l].f[i], &g[l].df[i*g[l].N], g[l].N*sizeof(double),cudaMemcpyDeviceToHost); checkCUDAError("cpy f To Host");
cudaMemcpy(g[l].U[i], &g[l].dU[i*g[l].N], g[l].N*sizeof(double),cudaMemcpyDeviceToHost); checkCUDAError("cpy U To Host");
cudaMemcpy(g[l].Un[i], &g[l].dUn[i*g[l].N], g[l].N*sizeof(double),cudaMemcpyDeviceToHost); checkCUDAError("cpy Un To Host");
cudaMemcpy(g[l].Res[i], &g[l].dRes[i*g[l].N], g[l].N*sizeof(double),cudaMemcpyDeviceToHost); checkCUDAError("cpy Res To Host");
cudaDeviceSynchronize();
}
}
void custommemcpyToDevice(gridLevel* g, size_t l){
for (size_t i = 0; i < g[l].N; i++){
cudaMemcpy(&g[l].df[i*g[l].N], g[l].f[i], g[l].N*sizeof(double),cudaMemcpyHostToDevice); checkCUDAError("cpy f To Device");
cudaMemcpy(&g[l].dU[i*g[l].N], g[l].U[i], g[l].N*sizeof(double),cudaMemcpyHostToDevice); checkCUDAError("cpy U To Device");
cudaMemcpy(&g[l].dUn[i*g[l].N], g[l].Un[i], g[l].N*sizeof(double),cudaMemcpyHostToDevice); checkCUDAError("cpy Un To Device");
cudaMemcpy(&g[l].dRes[i*g[l].N],g[l].Res[i], g[l].N*sizeof(double),cudaMemcpyHostToDevice); checkCUDAError("cpy Res To Device");
cudaDeviceSynchronize();
}
}
pointsInfo __p;
int main(int argc, char* argv[])
{
double tolerance = 1e-0; // L2 Difference Tolerance before reaching convergence.
size_t N0 = 10; // 2^N0 + 1 elements per side
// Multigrid parameters -- Find the best configuration!
size_t gridCount = N0-1; // Number of Multigrid levels to use
size_t downRelaxations = 5; // Number of Relaxations before restriction
size_t upRelaxations = 0; // Number of Relaxations after prolongation)
//I coudln't find a much better configuration, as the times are so small already
gridLevel* g = generateInitialConditions(N0, gridCount);
// gridLevel* d_g;
// cudaMalloc(&d_g, sizeof(gridLevel) * gridCount);checkCUDAError("Alloc error");
// cudaMemcpy(d_g,&g,sizeof(gridLevel) * gridCount, cudaMemcpyHostToDevice);checkCUDAError("Memcpy error");
// cudaDeviceSynchronize();
//for (size_t grid = 1; grid < gridCount; grid++) custommemcpyToDevice(g,grid);
auto startTime = std::chrono::system_clock::now();
while (g[0].L2NormDiff > tolerance) // Multigrid solver start
{
applyJacobi(g, 0, downRelaxations); // Relaxing the finest grid first
calculateResidual(g, 0); // Calculating Initial Residual
for (size_t grid = 1; grid < gridCount; grid++) // Going down the V-Cycle
{
applyRestriction(g, grid); // Restricting the residual to the coarser grid's solution vector (f)
applyJacobi(g, grid, downRelaxations); // Smoothing coarser level
calculateResidual(g, grid); // Calculating Coarse Grid Residual
}
for (size_t grid = gridCount-1; grid > 0; grid--) // Going up the V-Cycle
{
applyProlongation(g, grid); // Prolonging solution for coarser level up to finer level
applyJacobi(g, grid, upRelaxations); // Smoothing finer level
}
//custommemcpyToHost(g,grid);
printf("L2Norm: %.4f\n", g[0].L2Norm);
calculateL2Norm(g, 0); // Calculating Residual L2 Norm
} // Multigrid solver end
cudaDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
totalTime = std::chrono::duration<double>(endTime-startTime).count();
printTimings(gridCount);
printf("L2Norm: %.4f\n", g[0].L2Norm);
freeGrids(g, gridCount);
return 0;
}
//kernel function. Works only on the assigned thread Eex9Koh4pha-
__global__ void applyJacobiKernel( size_t N, double h1, double h2, double* Un, double* f, double* U)
{
size_t myRow = blockIdx.y*blockDim.y + threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x + threadIdx.x;
if(myRow >= N-1 || myCol >= N-1)return;
if(myRow < 1 || myCol < 1 )return;
U[myRow*N + myCol] = (Un[(myRow-1)*N + myCol]+Un[(myRow+1)*N + myCol]+Un[myRow*N + myCol-1]+Un[myRow*N + myCol+1]+f[myRow*N+myCol]*h2)*h1;
//g[l].U[myRow][myCol] = (g[l].Un[myRow-1][myCol] + g[l].Un[myRow+1][myCol] + g[l].Un[myRow][myCol-1] + g[l].Un[myRow][myCol+1] + g[l].f[myRow][myCol]*h2)*h1;
//__syncthreads();
}
void applyJacobi(gridLevel* g, size_t l, size_t relaxations)
{
//custommemcpyToDevice(g,l);
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}//depends on the size of the problem. int or dim3
auto t0 = std::chrono::system_clock::now();
double h1 = 0.25;
double h2 = g[l].h*g[l].h;
for (size_t r = 0; r < relaxations; r++)
{
//*
double* tmp = g[l].dUn; g[l].dUn = g[l].dU; g[l].dU = tmp;
applyJacobiKernel<<< blocksPerGrid,threadsPerBlock >>>(N, h1, h2, g[l].dUn, g[l].df, g[l].dU);checkCUDAError("apply Jacobi error");
//cudaDeviceSynchronize();
//*/
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++) // Perform a Jacobi Iteration
g[l].U[i][j] = (g[l].Un[i-1][j] + g[l].Un[i+1][j] + g[l].Un[i][j-1] + g[l].Un[i][j+1] + g[l].f[i][j]*h2)*h1;
//*/
}
cudaDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
smoothingTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void calculateResidualKernel( size_t N, double h2, double* Res, double* f, double* U)
{
size_t myRow = blockIdx.y*blockDim.y + threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x + threadIdx.x;
if(myRow >= N-1 || myCol >= N-1)return;
if(myRow < 1 || myCol < 1 )return;
Res[myRow*N + myCol] = f[myRow*N + myCol] + (U[(myRow-1)*N+myCol] + U[(myRow+1)*N + myCol] - 4*U[myRow*N +myCol] + U[myRow*N +myCol-1] + U[myRow*N +myCol+1]) * h2;
}
void calculateResidual(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g,l);
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
auto t0 = std::chrono::system_clock::now();
double h2 = 1.0 / pow(g[l].h,2);
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l].Res[i][j] = g[l].f[i][j] + (g[l].U[i-1][j] + g[l].U[i+1][j] - 4*g[l].U[i][j] + g[l].U[i][j-1] + g[l].U[i][j+1]) * h2;
*/
//*
calculateResidualKernel<<< blocksPerGrid,threadsPerBlock >>>(N, h2, g[l].dRes, g[l].df, g[l].dU);checkCUDAError("Calculate Residual Kernel error");
// cudaDeviceSynchronize();
//*/
cudaDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
residualTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void calculateSquareKernel(double *Res, size_t N)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= N || j >= N)return;
Res[i*N+j] = Res[i*N+j]*Res[i*N+j];
}
#define BLOCKSIZE 1024
//from the class example 3
__global__ void reduce(double* dVec, double* dAux, size_t N,size_t N2)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
if(i> N2) return;
sdata[tid] = dVec[i];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
//Eex9Koh4pha-
void calculateL2Norm(gridLevel* g, size_t l)
{
//double tmp = 0.0;
auto t0 = std::chrono::system_clock::now();
//*custommemcpyToDevice(g,l);
//*
int N = g[l].N;
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
calculateSquareKernel<<< blocksPerGrid,threadsPerBlock >>>(g[l].dRes, N);checkCUDAError("Calculate square Kernel error");
cudaDeviceSynchronize();
double *dtemp;
cudaMalloc(&dtemp, sizeof(double)*N*N); checkCUDAError("Error allocating dtemp");
cudaMemset(dtemp, 0.0, sizeof(double)*N*N);checkCUDAError("Error memset dtemp");
double *dRescpy; //sizeof(double)*BLOCKSIZE*BLOCKSIZE*4
cudaMalloc(&dRescpy, sizeof(double)*N*N); checkCUDAError("Error allocating dtemp");
//cudaMemset(dRescpy, 0.0, sizeof(double)*N*N);checkCUDAError("Error memset dtemp");
cudaDeviceSynchronize();
cudaMemcpy(dRescpy, g[l].dRes, sizeof(double)*N*N,cudaMemcpyDeviceToDevice); checkCUDAError("cpy Res To Host in L2");
cudaDeviceSynchronize();
for (size_t n =BLOCKSIZE*BLOCKSIZE*4; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE; if (bSize > n) bSize = n;
size_t gSize = ceil((double)n / (double)BLOCKSIZE); if (bSize > n) gSize = 1;
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
reduce<<<gSize, bSize,BLOCKSIZE*sizeof(unsigned int)>>>(dRescpy, dtemp, n, N*N);
double *tmp = dRescpy; dRescpy = dtemp; dtemp = tmp;
}
cudaDeviceSynchronize();
/*
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++)
g[l].Res[i][j] = g[l].Res[i][j]*g[l].Res[i][j];
//*/
/*
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++)
tmp += temp[i*N+j];
//*/
//printf("%d \n", tmp);
double result = 0.0;
cudaMemcpy(&result, dRescpy, sizeof(double), cudaMemcpyDeviceToHost); checkCUDAError("error copying result");
//cudaMemcpy(&result2, dRescpy, sizeof(double), cudaMemcpyDeviceToHost); checkCUDAError("error copying result2");
cudaDeviceSynchronize();
g[l].L2Norm = sqrt(result);
g[l].L2NormDiff = fabs(g[l].L2NormPrev - g[l].L2Norm);
g[l].L2NormPrev = g[l].L2Norm;
cudaDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
L2NormTime[l] += std::chrono::duration<double>(t1-t0).count();
cudaFree(dtemp);
cudaFree(dRescpy);
//custommemcpyToDevice(g,l);
}
//Eex9Koh4pha-
//kernel fct. Eex9Koh4pha-
__global__ void applyRestrictionKernel( size_t N, size_t N2, double* Res, double* f)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
double c =0.0;
if(i >= N-1 || j >= N-1)return;
if(i < 1 || j < 1 )return;
c = ( 1.0*( Res[(2*i-1)*N2 +(2*j-1)] + Res[(2*i-1)*N2 +(2*j+1)] + Res[(2*i+1)*N2 +(2*j-1)] + Res[(2*i+1)*N2 +(2*j+1)] ) +
2.0*( Res[(2*i-1)*N2 +2*j] + Res[(2*i)*N2 +(2*j-1)] + Res[(2*i+1)*N2 +2*j] + Res[(2*i)*N2 +(2*j+1)] ) +
4.0*( Res[(2*i)*N2 +(2*j)] ) ) * 0.0625;
f[i*N +j]=c;
}
void applyRestriction(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g, l);custommemcpyToDevice(g, l-1);
int N = g[l].N;
int N2 = g[l-1].N;//
double *temp;
cudaMalloc(&temp, sizeof(double) * N2 * N2); checkCUDAError("Alloc error temp");
cudaMemcpy(temp,g[l-1].dRes, N2*N2*sizeof(double),cudaMemcpyDeviceToDevice); checkCUDAError("cpy temp");
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
auto t0 = std::chrono::system_clock::now();
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l].f[i][j] = ( 1.0*( g[l-1].Res[2*i-1][2*j-1] + g[l-1].Res[2*i-1][2*j+1] + g[l-1].Res[2*i+1][2*j-1] + g[l-1].Res[2*i+1][2*j+1] ) +
2.0*( g[l-1].Res[2*i-1][2*j] + g[l-1].Res[2*i][2*j-1] + g[l-1].Res[2*i+1][2*j] + g[l-1].Res[2*i][2*j+1] ) +
4.0*( g[l-1].Res[2*i][2*j] ) ) * 0.0625;
for (size_t i = 0; i < g[l].N; i++)
for (size_t j = 0; j < g[l].N; j++) // Resetting U vector for the coarser level before smoothing -- Find out if this is really necessary.
g[l].U[i][j] = 0;
//*/
//*
applyRestrictionKernel<<< blocksPerGrid,threadsPerBlock >>>(N, N2, temp, g[l].df);checkCUDAError("Apply Restriction Kernel error");
//*/
cudaDeviceSynchronize();
cudaMemset(g[l].dU, 0.0, sizeof(double)* N*N);// Resetting U vector for the coarser level before smoothing
auto t1 = std::chrono::system_clock::now();
restrictionTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l);
}
//kernel fct. Eex9Koh4pha-
__global__ void applyProlongationKernel1( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N-1 && j < N-1) Um[2*i * N2 + 2*j] += U[i* N + j];
}
__global__ void applyProlongationKernel2( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N && j < N-1) Um[(2*i-1)* N2 + 2*j] += ( U[(i-1)* N + j] + U[i* N + j] ) *0.5;
}
__global__ void applyProlongationKernel3( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N-1 && j < N) Um[2*i* N2 + (2*j-1)] += ( U[i* N + (j-1)] + U[i* N + j] ) *0.5;
}
__global__ void applyProlongationKernel4( size_t N, size_t N2, double* U, double* Um)
{
size_t i = blockIdx.y*blockDim.y + threadIdx.y;
size_t j = blockIdx.x*blockDim.x + threadIdx.x;
if( i > 0 && j > 0 && i < N && j < N) Um[(2*i-1)* N2 + (2*j-1)] += ( U[(i-1)* N + (j-1)] + U[(i-1)* N + j] + U[i* N + (j-1)] + U[i* N + j] ) *0.25;
}
void applyProlongation(gridLevel* g, size_t l)
{
//custommemcpyToDevice(g, l);
//custommemcpyToDevice(g, l-1);
int N = g[l].N;
int N2 = g[l-1].N;
double *temp;
cudaMalloc(&temp, sizeof(double) * g[l-1].N * g[l-1].N);checkCUDAError("Alloc error temp");
cudaMemcpy(temp, g[l-1].dU, g[l-1].N*g[l-1].N*sizeof(double),cudaMemcpyDeviceToDevice); checkCUDAError("cpy temp dres");
dim3 threadsPerBlock=dim3(32,32);
dim3 blocksPerGrid;
if(N<32) {blocksPerGrid=dim3(ceil(N),ceil(N));}
else {blocksPerGrid=dim3(ceil(N/32),ceil(N/32));}
//2 pointers to U from 2 different levels of g
//Kernel size is definded by g[l] not g [l-1]
auto t0 = std::chrono::system_clock::now();
/*
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l-1].U[2*i][2*j] += g[l].U[i][j];
for (size_t i = 1; i < g[l].N; i++)
for (size_t j = 1; j < g[l].N-1; j++)
g[l-1].U[2*i-1][2*j] += ( g[l].U[i-1][j] + g[l].U[i][j] ) *0.5;
for (size_t i = 1; i < g[l].N-1; i++)
for (size_t j = 1; j < g[l].N; j++)
g[l-1].U[2*i][2*j-1] += ( g[l].U[i][j-1] + g[l].U[i][j] ) *0.5;
for (size_t i = 1; i < g[l].N; i++)
for (size_t j = 1; j < g[l].N; j++)
g[l-1].U[2*i-1][2*j-1] += ( g[l].U[i-1][j-1] + g[l].U[i-1][j] + g[l].U[i][j-1] + g[l].U[i][j] ) *0.25;
//*/
applyProlongationKernel1<<< blocksPerGrid,threadsPerBlock >>>( N, N2, g[l].dU, temp);
applyProlongationKernel2<<< blocksPerGrid,threadsPerBlock >>>( N, N2, g[l].dU, temp);
applyProlongationKernel3<<< blocksPerGrid,threadsPerBlock >>>( N, N2, g[l].dU, temp);
applyProlongationKernel4<<< blocksPerGrid,threadsPerBlock >>>( N, N2, g[l].dU, temp);
cudaDeviceSynchronize();
cudaMemcpy(g[l-1].dU, temp, g[l-1].N*g[l-1].N*sizeof(double),cudaMemcpyDeviceToDevice); checkCUDAError("cpy temp");
cudaDeviceSynchronize();
auto t1 = std::chrono::system_clock::now();
prolongTime[l] += std::chrono::duration<double>(t1-t0).count();
//custommemcpyToHost(g, l-1);
}
gridLevel* generateInitialConditions(size_t N0, size_t gridCount)
{
// Default values:
__p.nCandles = 4;
std::vector<double> pars;
pars.push_back(0.228162);
pars.push_back(0.226769);
pars.push_back(0.437278);
pars.push_back(0.0492324);
pars.push_back(0.65915);
pars.push_back(0.499616);
pars.push_back(0.59006);
pars.push_back(0.0566329);
pars.push_back(0.0186672);
pars.push_back(0.894063);
pars.push_back(0.424229);
pars.push_back(0.047725);
pars.push_back(0.256743);
pars.push_back(0.754483);
pars.push_back(0.490461);
pars.push_back(0.0485152);
// Allocating Timers
smoothingTime = (double*) calloc (gridCount, sizeof(double));
residualTime = (double*) calloc (gridCount, sizeof(double));
restrictionTime = (double*) calloc (gridCount, sizeof(double));
prolongTime = (double*) calloc (gridCount, sizeof(double));
L2NormTime = (double*) calloc (gridCount, sizeof(double));
// Allocating Grids
gridLevel* g = (gridLevel*) malloc(sizeof(gridLevel) * gridCount);
for (size_t i = 0; i < gridCount; i++)
{
g[i].N = pow(2, N0-i) + 1;
g[i].h = 1.0/(g[i].N-1);
g[i].U = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].U[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].Un = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].Un[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].Res = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].Res[j] = (double*) malloc(sizeof(double) * g[i].N);
g[i].f = (double**) malloc(sizeof(double*) * g[i].N); for (size_t j = 0; j < g[i].N ; j++) g[i].f[j] = (double*) malloc(sizeof(double) * g[i].N);
cudaMalloc(&g[i].dU, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_U");
cudaMalloc(&g[i].dRes, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_Res");
cudaMalloc(&g[i].dUn, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_Un");
cudaMalloc(&g[i].df, sizeof(double) * g[i].N * g[i].N);checkCUDAError("Alloc error d_f");
cudaDeviceSynchronize();
g[i].L2Norm = 0.0;
g[i].L2NormPrev = std::numeric_limits<double>::max();
g[i].L2NormDiff = std::numeric_limits<double>::max();
}
// Initial Guess
for (size_t i = 0; i < g[0].N; i++) for (size_t j = 0; j < g[0].N; j++) g[0].U[i][j] = 1.0;
// Boundary Conditions
for (size_t i = 0; i < g[0].N; i++) g[0].U[0][i] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[g[0].N-1][i] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[i][0] = 0.0;
for (size_t i = 0; i < g[0].N; i++) g[0].U[i][g[0].N-1] = 0.0;
// F
for (size_t i = 0; i < g[0].N; i++){
for (size_t j = 0; j < g[0].N; j++)
{
double h = 1.0/(g[0].N-1);
double x = i*h;
double y = j*h;
g[0].f[i][j] = 0.0;
for (size_t c = 0; c < __p.nCandles; c++)
{
double c3 = pars[c*4 + 0]; // x0
double c4 = pars[c*4 + 1]; // y0
double c1 = pars[c*4 + 2]; c1 *= 100000;// intensity
double c2 = pars[c*4 + 3]; c2 *= 0.01;// Width
g[0].f[i][j] += c1*exp(-(pow(c4 - y, 2) + pow(c3 - x, 2)) / c2);
}
}
cudaMemcpy(&g[0].df[i*g[0].N], g[0].f[i],g[0].N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(&g[0].dU[i*g[0].N], g[0].U[i],g[0].N*sizeof(double),cudaMemcpyHostToDevice);
}
cudaMemset(g[0].dUn, 0, g[0].N*g[0].N*sizeof(double));
cudaMemset(g[0].dRes, 0, g[0].N*g[0].N*sizeof(double));
return g;
}
void freeGrids(gridLevel* g, size_t gridCount)
{
for (size_t i = 0; i < gridCount; i++)
{
for (size_t j = 0; j < g[i].N ; j++) free(g[i].U[j]);
for (size_t j = 0; j < g[i].N ; j++) free(g[i].f[j]);
for (size_t j = 0; j < g[i].N ; j++) free(g[i].Res[j]);
free(g[i].U);
free(g[i].Un);
free(g[i].f);
free(g[i].Res);
cudaFree(g[i].dU);
cudaFree(g[i].dUn);
cudaFree(g[i].df);
cudaFree(g[i].dRes);
}
free(g);
}
void printTimings(size_t gridCount)
{
double* timePerGrid = (double*) calloc (sizeof(double), gridCount);
double totalSmoothingTime = 0.0;
double totalResidualTime = 0.0;
double totalRestrictionTime = 0.0;
double totalProlongTime = 0.0;
double totalL2NormTime = 0.0;
for (size_t i = 0; i < gridCount; i++) timePerGrid[i] = smoothingTime[i] + residualTime[i] + restrictionTime[i] + prolongTime[i] + L2NormTime[i];
for (size_t i = 0; i < gridCount; i++) totalSmoothingTime += smoothingTime[i];
for (size_t i = 0; i < gridCount; i++) totalResidualTime += residualTime[i];
for (size_t i = 0; i < gridCount; i++) totalRestrictionTime += restrictionTime[i];
for (size_t i = 0; i < gridCount; i++) totalProlongTime += prolongTime[i];
for (size_t i = 0; i < gridCount; i++) totalL2NormTime += L2NormTime[i];
double totalMeasured = totalSmoothingTime + totalResidualTime + totalRestrictionTime + totalProlongTime + totalL2NormTime;
printf(" Time (s) "); for (size_t i = 0; i < gridCount; i++) printf("Grid%lu ", i); printf(" Total \n");
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("Smoothing | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", smoothingTime[i]); printf("| %2.3f \n", totalSmoothingTime);
printf("Residual | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", residualTime[i]); printf("| %2.3f \n", totalResidualTime);
printf("Restriction | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", restrictionTime[i]); printf("| %2.3f \n", totalRestrictionTime);
printf("Prolongation | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", prolongTime[i]); printf("| %2.3f \n", totalProlongTime);
printf("L2Norm | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", L2NormTime[i]); printf("| %2.3f \n", totalL2NormTime);
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("Total | "); for (size_t i = 0; i < gridCount; i++) printf("%2.3f ", timePerGrid[i]); printf("| %2.3f \n", totalMeasured);
printf("-------------|-"); for (size_t i = 0; i < gridCount; i++) printf("--------"); printf("|---------\n");
printf("\n");
printf("Running Time : %.3fs\n", totalTime);
}
|
350787d8ccfcdd1f6df3ae0e0ccb21bdd4ff6d8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void VecAdd(float* A, float* B, float* C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc, char** argv){
srand(2634);
int N = atoi(argv[1]);
char* out = argv[2];
hipEvent_t start, stop, fin;
float dur_time;
size_t size = N * sizeof(float);
float* h_A;
hipHostMalloc((void**)&h_A, size);
float* h_B;
hipHostMalloc((void**)&h_B, size);
float* h_C;
hipHostMalloc((void**)&h_C, size);
float* h_D;
hipHostMalloc((void**)&h_D, size);
float* h_F;
hipHostMalloc((void**)&h_F, size);
int i;
for (i = 0; i < N; ++i){
h_A[i] = (float)rand() / RAND_MAX;
h_B[i] = (float)rand() / RAND_MAX;
h_D[i] = (float)rand() / RAND_MAX;
}
float* d_A;
hipMalloc((void**)&d_A, size);
float* d_B;
hipMalloc((void**)&d_B, size);
float* d_C;
hipMalloc((void**)&d_C, size);
float* d_D;
hipMalloc((void**)&d_D, size);
float* d_F;
hipMalloc((void**)&d_F, size);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&fin);
hipEventRecord(start, 0);
hipStream_t stream[2];
hipStreamCreate(&stream[0]);
hipStreamCreate(&stream[1]);
hipMemcpyAsync(d_A, h_A, size, hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(d_B, h_B, size, hipMemcpyHostToDevice, stream[0]);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream[0], d_A, d_B, d_C, N);
hipEventRecord(fin, stream[0]);
hipMemcpyAsync(h_C, d_C, size, hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(d_D, h_D, size, hipMemcpyHostToDevice, stream[1]);
hipStreamWaitEvent(stream[1], fin, 0);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream[1], d_C, d_D, d_F, N);
hipMemcpyAsync(h_F, d_F, size, hipMemcpyDeviceToHost, stream[1]);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&dur_time, start, stop);
fprintf(stderr, "%.3f\n", dur_time);
hipEventDestroy(fin);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_D);
hipFree(d_F);
freopen(out, "w", stdout);
for (i = 0; i < N; ++i)
printf("%.5f %.5f\n", h_C[i], h_F[i]);
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_C);
hipHostFree(h_D);
hipHostFree(h_F);
return 0;
} | 350787d8ccfcdd1f6df3ae0e0ccb21bdd4ff6d8c.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void VecAdd(float* A, float* B, float* C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc, char** argv){
srand(2634);
int N = atoi(argv[1]);
char* out = argv[2];
cudaEvent_t start, stop, fin;
float dur_time;
size_t size = N * sizeof(float);
float* h_A;
cudaMallocHost((void**)&h_A, size);
float* h_B;
cudaMallocHost((void**)&h_B, size);
float* h_C;
cudaMallocHost((void**)&h_C, size);
float* h_D;
cudaMallocHost((void**)&h_D, size);
float* h_F;
cudaMallocHost((void**)&h_F, size);
int i;
for (i = 0; i < N; ++i){
h_A[i] = (float)rand() / RAND_MAX;
h_B[i] = (float)rand() / RAND_MAX;
h_D[i] = (float)rand() / RAND_MAX;
}
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_B;
cudaMalloc((void**)&d_B, size);
float* d_C;
cudaMalloc((void**)&d_C, size);
float* d_D;
cudaMalloc((void**)&d_D, size);
float* d_F;
cudaMalloc((void**)&d_F, size);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&fin);
cudaEventRecord(start, 0);
cudaStream_t stream[2];
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(d_B, h_B, size, cudaMemcpyHostToDevice, stream[0]);
VecAdd<<<blocksPerGrid, threadsPerBlock, 0, stream[0]>>>(d_A, d_B, d_C, N);
cudaEventRecord(fin, stream[0]);
cudaMemcpyAsync(h_C, d_C, size, cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(d_D, h_D, size, cudaMemcpyHostToDevice, stream[1]);
cudaStreamWaitEvent(stream[1], fin, 0);
VecAdd<<<blocksPerGrid, threadsPerBlock, 0, stream[1]>>>(d_C, d_D, d_F, N);
cudaMemcpyAsync(h_F, d_F, size, cudaMemcpyDeviceToHost, stream[1]);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur_time, start, stop);
fprintf(stderr, "%.3f\n", dur_time);
cudaEventDestroy(fin);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
cudaFree(d_F);
freopen(out, "w", stdout);
for (i = 0; i < N; ++i)
printf("%.5f %.5f\n", h_C[i], h_F[i]);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
cudaFreeHost(h_D);
cudaFreeHost(h_F);
return 0;
} |
f906cedf34e0420af5e9f2de5dc7d00c756bb898.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <mex.h>
#define MEX_CHECK_RETURN(in) {hipError_t __errLocal = in; if(__errLocal != hipSuccess) { mexPrintf("ERROR: file %s, line %i\n CUDA call \n\t" #in " \n returned error (%i) - \"%s\"\n", __FILE__, __LINE__, __errLocal, hipGetErrorString(__errLocal)); } }
__device__ __constant__ int rvVectorLengths;
__device__ __constant__ int rvWindowSize;
__device__ __constant__ int rvPrePostLag;
__device__ __constant__ int rvLeftAlpha;
__device__ __constant__ int rvRightAlpha;
__device__ __constant__ int rvNumElsX;
__device__ __constant__ int rvNumElsY;
__device__ __constant__ int rvSharedMemorySizeX;
__device__ __constant__ int rvSharedMemorySizeY;
__device__ float kernel_cov(float *pA, float *pB, int pElements) {
float summation[2] = {0.0f,0.0f};
//Calculate the means for each vector
for(int i = 0; i < pElements; i++) {
summation[0] = summation[0] + pA[i];
summation[1] = summation[1] + pB[i];
}
summation[0] = summation[0] / pElements;
summation[1] = summation[1] / pElements;
float covMeasure = 0.0f;
//Calculate the means for each vector
for(int i = 0; i < pElements; i++) {
covMeasure = covMeasure + (pA[i]-summation[0])*(pB[i]-summation[1]);
}
covMeasure = covMeasure / (pElements-1);
return covMeasure;
}
__global__ void windowed_cov(float *pX, float *pY, float *pOut, int pRowOffset) {
extern __shared__ float sharedMem[];
int t = blockIdx.y * gridDim.x + blockIdx.x;
//t = t + 1;
int yRange[2];
yRange[0] = t-rvLeftAlpha;
yRange[1] = t+rvRightAlpha;
int xRange[2];
xRange[0] = t - rvPrePostLag - rvLeftAlpha;
xRange[1] = t + rvPrePostLag + rvRightAlpha;
int outputRow = threadIdx.x + pRowOffset;
int outputLocation = t * (2*rvPrePostLag + 1) + outputRow;
/*if(xRange[0] < 0 || yRange[0] < 0 || xRange[1] > rvVectorLengths || yRange[1] > rvVectorLengths) {
pOut[outputLocation] = 0;
return;
}*/
float *ySharedMem = (float*) sharedMem;
float *xSharedMem = (float*) &ySharedMem[rvNumElsY];
//__shared__ float ySharedMem[rvSharedMemorySizeY];
//__shared__ float xSharedMem[rvSharedMemorySizeX];
/*
Load y into shared memory
*/
int globalMemLocation;
//Pull the Y into shared memory
int localMemPos = threadIdx.x;
while(localMemPos < rvWindowSize+1) {
globalMemLocation = min(max(yRange[0] + localMemPos,0),rvVectorLengths-1);
ySharedMem[localMemPos] = pY[globalMemLocation];
localMemPos = localMemPos + blockDim.x;
}
__syncthreads();
// Y is loaded!
/*if(outputRow < 601) {
pOut[outputLocation] = ySharedMem[300];
}
return;*/
/*
Load x into shared memory
*/
// Total evaluation range for this sample t
int xEvalRange[2];
xEvalRange[0] = t - rvPrePostLag;
xEvalRange[1] = t + rvPrePostLag;
// Evaluation range for this current kernel loop
int kernelLoopEvalRange[2];
kernelLoopEvalRange[0] = xEvalRange[0] + pRowOffset;
kernelLoopEvalRange[1] = min(kernelLoopEvalRange[0] + blockDim.x,xEvalRange[1]);
int blockMemoryRange[2];
blockMemoryRange[0] = kernelLoopEvalRange[0] - rvLeftAlpha;
blockMemoryRange[1] = min(kernelLoopEvalRange[1] + rvRightAlpha, xEvalRange[1] + rvRightAlpha);
localMemPos = threadIdx.x;
while(localMemPos < blockMemoryRange[1] - blockMemoryRange[0]+1) {
globalMemLocation = min(max(blockMemoryRange[0] + localMemPos,0), rvVectorLengths-1);
xSharedMem[localMemPos] = pX[globalMemLocation];
localMemPos = localMemPos + blockDim.x;
}
__syncthreads();
if(outputRow >= (2 * rvPrePostLag + 1)) {
return;
}
float cov = kernel_cov(ySharedMem, xSharedMem + threadIdx.x, rvWindowSize + 1);
pOut[outputLocation] = cov;
//pOut[outputLocation] = blockMemoryRange[1];
}
void RunCuda(float *pStaticVector, float *pMovingVector, float *pOutput, int pNumElements, int pWindowSize, int pPrePostLag) {
int deviceCount;
MEX_CHECK_RETURN(hipGetDeviceCount(&deviceCount));
if(deviceCount == 2) {
hipSetDevice(1);
}
// Calculate the size of the output
int heightOut = pPrePostLag * 2 + 1;
int widthOut = pNumElements;
int outputLength = heightOut * widthOut;
int leftAlpha = ceil(pWindowSize/2.0f);
int rightAlpha = floor(pWindowSize/2.0f);
int numElsX = pWindowSize + 2 * pPrePostLag + 1;
int numElsY = pWindowSize;
int smXSz = numElsX*sizeof(float);
int smYSz = numElsY*sizeof(float);
//THIS WORKS
int kernelRunsPerSample = 1;
int threadsPerBlock = ceil(heightOut / 32.0f) * 32;
while (threadsPerBlock > 512) {
kernelRunsPerSample ++;
threadsPerBlock = ceil((heightOut / kernelRunsPerSample)/32.0f)*32;
}
//Need to setup the number of blocks horizontally
int verticalBlocksInGrid = 1;
int horizontalBlocksInGrid = int(pNumElements * 1.0f / verticalBlocksInGrid);
while(horizontalBlocksInGrid > 65535) {
verticalBlocksInGrid++;
horizontalBlocksInGrid = int(pNumElements * 1.0f / verticalBlocksInGrid);
}
//Now allocate the device data;
float *deviceX, *deviceY, *deviceOutput;
//mexPrintf("Allocating device input memory of length %i\n",pNumElements);
MEX_CHECK_RETURN(hipMalloc(&deviceX, sizeof(float) * pNumElements));
MEX_CHECK_RETURN(hipMalloc(&deviceY, sizeof(float) * pNumElements));
//mexPrintf("Allocating device output (%ix%i) memory of length %.2fMB (%i bytes)\n",heightOut, widthOut, outputLength/1024.0f/1024.0f*sizeof(float),outputLength*sizeof(float));
MEX_CHECK_RETURN(hipMalloc(&deviceOutput, sizeof(float) * outputLength));
//mexPrintf("Setting constants\n");
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvVectorLengths", &pNumElements,sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvWindowSize", &pWindowSize,sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvPrePostLag", &pPrePostLag,sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvLeftAlpha", &leftAlpha,sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvRightAlpha", &rightAlpha,sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvNumElsX", &numElsX, sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvNumElsY", &numElsY, sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvSharedMemorySizeX", &smXSz, sizeof(int)));
MEX_CHECK_RETURN(hipMemcpyToSymbol("rvSharedMemorySizeY", &smYSz, sizeof(int)));
//mexPrintf("Copying local memory to device\n");
MEX_CHECK_RETURN(hipMemcpy( deviceX, pStaticVector, sizeof(float) * pNumElements, hipMemcpyHostToDevice ));
MEX_CHECK_RETURN(hipMemcpy( deviceY, pMovingVector, sizeof(float) * pNumElements, hipMemcpyHostToDevice ));
//Don't think we need to do this
//MEX_CHECK_RETURN(hipMemcpy( deviceOutput, pOutput, sizeof(float) * outputLength, hipMemcpyHostToDevice ));
dim3 dimGrid(horizontalBlocksInGrid, verticalBlocksInGrid);
dim3 dimBlock(threadsPerBlock);
//kernelRunsPerSample = 1; //mexPrintf("**HARDCODED TO kernelRunsPerSample=%i**\n",kernelRunsPerSample);
//mexPrintf("Starting CUDA!! will run kernel %ix times with Grid dimension %ix%i, TBP(%i)\n", kernelRunsPerSample, horizontalBlocksInGrid, verticalBlocksInGrid, threadsPerBlock);
for(int runNum = 0; runNum < kernelRunsPerSample; runNum++) {
hipLaunchKernelGGL(( windowed_cov), dim3(dimGrid), dim3(dimBlock),smXSz + smYSz, 0, deviceX, deviceY, deviceOutput, runNum * threadsPerBlock);
MEX_CHECK_RETURN(hipDeviceSynchronize());
MEX_CHECK_RETURN(hipGetLastError());
}
//mexPrintf("Pulling results from GPU\n");
MEX_CHECK_RETURN(hipMemcpy( pOutput, deviceOutput, sizeof(float) * outputLength, hipMemcpyDeviceToHost ));
//mexPrintf("Freeing device output memory\n");
MEX_CHECK_RETURN(hipFree(deviceOutput));
//mexPrintf("Freeing device memory\n");
MEX_CHECK_RETURN(hipFree(deviceX));
MEX_CHECK_RETURN(hipFree(deviceY));
return;
}
| f906cedf34e0420af5e9f2de5dc7d00c756bb898.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <mex.h>
#define MEX_CHECK_RETURN(in) {cudaError_t __errLocal = in; if(__errLocal != CUDA_SUCCESS) { mexPrintf("ERROR: file %s, line %i\n CUDA call \n\t" #in " \n returned error (%i) - \"%s\"\n", __FILE__, __LINE__, __errLocal, cudaGetErrorString(__errLocal)); } }
__device__ __constant__ int rvVectorLengths;
__device__ __constant__ int rvWindowSize;
__device__ __constant__ int rvPrePostLag;
__device__ __constant__ int rvLeftAlpha;
__device__ __constant__ int rvRightAlpha;
__device__ __constant__ int rvNumElsX;
__device__ __constant__ int rvNumElsY;
__device__ __constant__ int rvSharedMemorySizeX;
__device__ __constant__ int rvSharedMemorySizeY;
__device__ float kernel_cov(float *pA, float *pB, int pElements) {
float summation[2] = {0.0f,0.0f};
//Calculate the means for each vector
for(int i = 0; i < pElements; i++) {
summation[0] = summation[0] + pA[i];
summation[1] = summation[1] + pB[i];
}
summation[0] = summation[0] / pElements;
summation[1] = summation[1] / pElements;
float covMeasure = 0.0f;
//Calculate the means for each vector
for(int i = 0; i < pElements; i++) {
covMeasure = covMeasure + (pA[i]-summation[0])*(pB[i]-summation[1]);
}
covMeasure = covMeasure / (pElements-1);
return covMeasure;
}
__global__ void windowed_cov(float *pX, float *pY, float *pOut, int pRowOffset) {
extern __shared__ float sharedMem[];
int t = blockIdx.y * gridDim.x + blockIdx.x;
//t = t + 1;
int yRange[2];
yRange[0] = t-rvLeftAlpha;
yRange[1] = t+rvRightAlpha;
int xRange[2];
xRange[0] = t - rvPrePostLag - rvLeftAlpha;
xRange[1] = t + rvPrePostLag + rvRightAlpha;
int outputRow = threadIdx.x + pRowOffset;
int outputLocation = t * (2*rvPrePostLag + 1) + outputRow;
/*if(xRange[0] < 0 || yRange[0] < 0 || xRange[1] > rvVectorLengths || yRange[1] > rvVectorLengths) {
pOut[outputLocation] = 0;
return;
}*/
float *ySharedMem = (float*) sharedMem;
float *xSharedMem = (float*) &ySharedMem[rvNumElsY];
//__shared__ float ySharedMem[rvSharedMemorySizeY];
//__shared__ float xSharedMem[rvSharedMemorySizeX];
/*
Load y into shared memory
*/
int globalMemLocation;
//Pull the Y into shared memory
int localMemPos = threadIdx.x;
while(localMemPos < rvWindowSize+1) {
globalMemLocation = min(max(yRange[0] + localMemPos,0),rvVectorLengths-1);
ySharedMem[localMemPos] = pY[globalMemLocation];
localMemPos = localMemPos + blockDim.x;
}
__syncthreads();
// Y is loaded!
/*if(outputRow < 601) {
pOut[outputLocation] = ySharedMem[300];
}
return;*/
/*
Load x into shared memory
*/
// Total evaluation range for this sample t
int xEvalRange[2];
xEvalRange[0] = t - rvPrePostLag;
xEvalRange[1] = t + rvPrePostLag;
// Evaluation range for this current kernel loop
int kernelLoopEvalRange[2];
kernelLoopEvalRange[0] = xEvalRange[0] + pRowOffset;
kernelLoopEvalRange[1] = min(kernelLoopEvalRange[0] + blockDim.x,xEvalRange[1]);
int blockMemoryRange[2];
blockMemoryRange[0] = kernelLoopEvalRange[0] - rvLeftAlpha;
blockMemoryRange[1] = min(kernelLoopEvalRange[1] + rvRightAlpha, xEvalRange[1] + rvRightAlpha);
localMemPos = threadIdx.x;
while(localMemPos < blockMemoryRange[1] - blockMemoryRange[0]+1) {
globalMemLocation = min(max(blockMemoryRange[0] + localMemPos,0), rvVectorLengths-1);
xSharedMem[localMemPos] = pX[globalMemLocation];
localMemPos = localMemPos + blockDim.x;
}
__syncthreads();
if(outputRow >= (2 * rvPrePostLag + 1)) {
return;
}
float cov = kernel_cov(ySharedMem, xSharedMem + threadIdx.x, rvWindowSize + 1);
pOut[outputLocation] = cov;
//pOut[outputLocation] = blockMemoryRange[1];
}
void RunCuda(float *pStaticVector, float *pMovingVector, float *pOutput, int pNumElements, int pWindowSize, int pPrePostLag) {
int deviceCount;
MEX_CHECK_RETURN(cudaGetDeviceCount(&deviceCount));
if(deviceCount == 2) {
cudaSetDevice(1);
}
// Calculate the size of the output
int heightOut = pPrePostLag * 2 + 1;
int widthOut = pNumElements;
int outputLength = heightOut * widthOut;
int leftAlpha = ceil(pWindowSize/2.0f);
int rightAlpha = floor(pWindowSize/2.0f);
int numElsX = pWindowSize + 2 * pPrePostLag + 1;
int numElsY = pWindowSize;
int smXSz = numElsX*sizeof(float);
int smYSz = numElsY*sizeof(float);
//THIS WORKS
int kernelRunsPerSample = 1;
int threadsPerBlock = ceil(heightOut / 32.0f) * 32;
while (threadsPerBlock > 512) {
kernelRunsPerSample ++;
threadsPerBlock = ceil((heightOut / kernelRunsPerSample)/32.0f)*32;
}
//Need to setup the number of blocks horizontally
int verticalBlocksInGrid = 1;
int horizontalBlocksInGrid = int(pNumElements * 1.0f / verticalBlocksInGrid);
while(horizontalBlocksInGrid > 65535) {
verticalBlocksInGrid++;
horizontalBlocksInGrid = int(pNumElements * 1.0f / verticalBlocksInGrid);
}
//Now allocate the device data;
float *deviceX, *deviceY, *deviceOutput;
//mexPrintf("Allocating device input memory of length %i\n",pNumElements);
MEX_CHECK_RETURN(cudaMalloc(&deviceX, sizeof(float) * pNumElements));
MEX_CHECK_RETURN(cudaMalloc(&deviceY, sizeof(float) * pNumElements));
//mexPrintf("Allocating device output (%ix%i) memory of length %.2fMB (%i bytes)\n",heightOut, widthOut, outputLength/1024.0f/1024.0f*sizeof(float),outputLength*sizeof(float));
MEX_CHECK_RETURN(cudaMalloc(&deviceOutput, sizeof(float) * outputLength));
//mexPrintf("Setting constants\n");
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvVectorLengths", &pNumElements,sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvWindowSize", &pWindowSize,sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvPrePostLag", &pPrePostLag,sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvLeftAlpha", &leftAlpha,sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvRightAlpha", &rightAlpha,sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvNumElsX", &numElsX, sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvNumElsY", &numElsY, sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvSharedMemorySizeX", &smXSz, sizeof(int)));
MEX_CHECK_RETURN(cudaMemcpyToSymbol("rvSharedMemorySizeY", &smYSz, sizeof(int)));
//mexPrintf("Copying local memory to device\n");
MEX_CHECK_RETURN(cudaMemcpy( deviceX, pStaticVector, sizeof(float) * pNumElements, cudaMemcpyHostToDevice ));
MEX_CHECK_RETURN(cudaMemcpy( deviceY, pMovingVector, sizeof(float) * pNumElements, cudaMemcpyHostToDevice ));
//Don't think we need to do this
//MEX_CHECK_RETURN(cudaMemcpy( deviceOutput, pOutput, sizeof(float) * outputLength, cudaMemcpyHostToDevice ));
dim3 dimGrid(horizontalBlocksInGrid, verticalBlocksInGrid);
dim3 dimBlock(threadsPerBlock);
//kernelRunsPerSample = 1; //mexPrintf("**HARDCODED TO kernelRunsPerSample=%i**\n",kernelRunsPerSample);
//mexPrintf("Starting CUDA!! will run kernel %ix times with Grid dimension %ix%i, TBP(%i)\n", kernelRunsPerSample, horizontalBlocksInGrid, verticalBlocksInGrid, threadsPerBlock);
for(int runNum = 0; runNum < kernelRunsPerSample; runNum++) {
windowed_cov<<<dimGrid, dimBlock,smXSz + smYSz>>>(deviceX, deviceY, deviceOutput, runNum * threadsPerBlock);
MEX_CHECK_RETURN(cudaDeviceSynchronize());
MEX_CHECK_RETURN(cudaGetLastError());
}
//mexPrintf("Pulling results from GPU\n");
MEX_CHECK_RETURN(cudaMemcpy( pOutput, deviceOutput, sizeof(float) * outputLength, cudaMemcpyDeviceToHost ));
//mexPrintf("Freeing device output memory\n");
MEX_CHECK_RETURN(cudaFree(deviceOutput));
//mexPrintf("Freeing device memory\n");
MEX_CHECK_RETURN(cudaFree(deviceX));
MEX_CHECK_RETURN(cudaFree(deviceY));
return;
}
|
630adac1cf7cfa42df5ad5d04207dcbcb03ba7e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <sys/time.h>
#include <math.h>
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = 1/sqrt(2*M_PI) * exp(-0.5*pow(x,2));
if (y<=z)
count_d[tid] = 1;
else
count_d[tid] = 0;
}
void CUDAErrorCheck()
{
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
printf("CUDA error : %s (%d)\n", hipGetErrorString(error), error);
exit(0);
}
}
int main(int argc,char* argv[])
{
int niter = atoi(argv[1]);
int repetitions = 3;
int j = 0;
for (j=0; j<repetitions; j++)
{
float *randomnums;
double phi;
hipMalloc((void**)&randomnums, (2*niter)*sizeof(float));
// Use CuRand to generate an array of random numbers on the device
int status;
hiprandGenerator_t gen;
status = hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A);
status |= hiprandSetPseudoRandomGeneratorSeed(gen, 2138+j);
// status |= hiprandSetPseudoRandomGeneratorSeed(gen, 4294967296ULL^time(NULL));
status |= hiprandGenerateUniform(gen, randomnums, (2*niter));
status |= hiprandDestroyGenerator(gen);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("CuRand Failure\n");
exit(EXIT_FAILURE);
}
int threads = 1000;
int blocks = 100;
int* count_d;
int *count = (int*)malloc(blocks*threads*sizeof(int));
unsigned int reducedcount = 0;
hipMalloc((void**)&count_d, (blocks*threads)*sizeof(int));
CUDAErrorCheck();
struct timeval begin, end;
gettimeofday(&begin, NULL);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//one point per thread
hipLaunchKernelGGL(( kernel) , dim3(blocks), dim3(threads), 0, 0, count_d, randomnums);
hipDeviceSynchronize();
CUDAErrorCheck();
hipMemcpy(count, count_d, blocks*threads*sizeof(int), hipMemcpyDeviceToHost);
int i = 0;
//reduce array into int
for(i = 0; i<niter; i++)
reducedcount += count[i];
hipEventRecord(stop, 0);
float elapsedTime = 0;
hipEventElapsedTime(&elapsedTime, start, stop);
gettimeofday(&end, NULL);
double elapsed = (end.tv_sec - begin.tv_sec) + ((end.tv_usec - begin.tv_usec)/1000000.0);
hipFree(randomnums);
hipFree(count_d);
free(count);
hipEventDestroy(start);
hipEventDestroy(stop);
phi = ((double)reducedcount/niter)*1.0 + 0.5;
printf("CUDA - area to left of 1 on standard normal: %f\n", phi);
//printf("runtime: %f\n", elapsedTime);
printf("runtime: %f\n", elapsed);
//printf("runtime: %f\n", seconds);
}
return 0;
}
| 630adac1cf7cfa42df5ad5d04207dcbcb03ba7e9.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <sys/time.h>
#include <math.h>
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = 1/sqrt(2*M_PI) * exp(-0.5*pow(x,2));
if (y<=z)
count_d[tid] = 1;
else
count_d[tid] = 0;
}
void CUDAErrorCheck()
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error : %s (%d)\n", cudaGetErrorString(error), error);
exit(0);
}
}
int main(int argc,char* argv[])
{
int niter = atoi(argv[1]);
int repetitions = 3;
int j = 0;
for (j=0; j<repetitions; j++)
{
float *randomnums;
double phi;
cudaMalloc((void**)&randomnums, (2*niter)*sizeof(float));
// Use CuRand to generate an array of random numbers on the device
int status;
curandGenerator_t gen;
status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A);
status |= curandSetPseudoRandomGeneratorSeed(gen, 2138+j);
// status |= curandSetPseudoRandomGeneratorSeed(gen, 4294967296ULL^time(NULL));
status |= curandGenerateUniform(gen, randomnums, (2*niter));
status |= curandDestroyGenerator(gen);
if (status != CURAND_STATUS_SUCCESS)
{
printf("CuRand Failure\n");
exit(EXIT_FAILURE);
}
int threads = 1000;
int blocks = 100;
int* count_d;
int *count = (int*)malloc(blocks*threads*sizeof(int));
unsigned int reducedcount = 0;
cudaMalloc((void**)&count_d, (blocks*threads)*sizeof(int));
CUDAErrorCheck();
struct timeval begin, end;
gettimeofday(&begin, NULL);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//one point per thread
kernel <<<blocks, threads>>> (count_d, randomnums);
cudaDeviceSynchronize();
CUDAErrorCheck();
cudaMemcpy(count, count_d, blocks*threads*sizeof(int), cudaMemcpyDeviceToHost);
int i = 0;
//reduce array into int
for(i = 0; i<niter; i++)
reducedcount += count[i];
cudaEventRecord(stop, 0);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
gettimeofday(&end, NULL);
double elapsed = (end.tv_sec - begin.tv_sec) + ((end.tv_usec - begin.tv_usec)/1000000.0);
cudaFree(randomnums);
cudaFree(count_d);
free(count);
cudaEventDestroy(start);
cudaEventDestroy(stop);
phi = ((double)reducedcount/niter)*1.0 + 0.5;
printf("CUDA - area to left of 1 on standard normal: %f\n", phi);
//printf("runtime: %f\n", elapsedTime);
printf("runtime: %f\n", elapsed);
//printf("runtime: %f\n", seconds);
}
return 0;
}
|
d8106f4ab695b547eb3d79b42bd41a6ae4d02c89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
hipLaunchKernelGGL(( mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS>), dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const hipStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 192, 256, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 192, 256, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 192, 256, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 192, 256, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
template void mmha_launch_kernel<float, 192, 256, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 192, 256, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 192, 256, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 192, 256, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
| d8106f4ab695b547eb3d79b42bd41a6ae4d02c89.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS><<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 192, 256, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 192, 256, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 192, 256, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 192, 256, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
template void mmha_launch_kernel<float, 192, 256, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 192, 256, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 192, 256, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 192, 256, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
|
972365483d44244436cece97007076cc0de835c8.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/set_operations.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/gather.h>
#include "cupoch/camera/pinhole_camera_parameters.h"
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/image.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/platform.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
struct extract_grid_index_functor {
__device__ Eigen::Vector3i operator()(const Voxel &voxel) const {
return voxel.grid_index_;
}
};
__host__ __device__ void GetVoxelBoundingPoints(const Eigen::Vector3f &x,
float r,
Eigen::Vector3f points[8]) {
points[0] = x + Eigen::Vector3f(-r, -r, -r);
points[1] = x + Eigen::Vector3f(-r, -r, r);
points[2] = x + Eigen::Vector3f(r, -r, -r);
points[3] = x + Eigen::Vector3f(r, -r, r);
points[4] = x + Eigen::Vector3f(-r, r, -r);
points[5] = x + Eigen::Vector3f(-r, r, r);
points[6] = x + Eigen::Vector3f(r, r, -r);
points[7] = x + Eigen::Vector3f(r, r, r);
}
struct compute_carve_functor {
compute_carve_functor(const uint8_t *image,
int width,
int height,
int num_of_channels,
int bytes_per_channel,
float voxel_size,
const Eigen::Vector3f &origin,
const Eigen::Matrix3f &intrinsic,
const Eigen::Matrix3f &rot,
const Eigen::Vector3f &trans,
bool keep_voxels_outside_image)
: image_(image),
width_(width),
height_(height),
num_of_channels_(num_of_channels),
bytes_per_channel_(bytes_per_channel),
voxel_size_(voxel_size),
origin_(origin),
intrinsic_(intrinsic),
rot_(rot),
trans_(trans),
keep_voxels_outside_image_(keep_voxels_outside_image){};
const uint8_t *image_;
const int width_;
const int height_;
const int num_of_channels_;
const int bytes_per_channel_;
const float voxel_size_;
const Eigen::Vector3f origin_;
const Eigen::Matrix3f intrinsic_;
const Eigen::Matrix3f rot_;
const Eigen::Vector3f trans_;
bool keep_voxels_outside_image_;
__device__ bool operator()(
const thrust::tuple<Eigen::Vector3i, Voxel> &voxel) const {
bool carve = true;
float r = voxel_size_ / 2.0;
const Voxel &vxl = thrust::get<1>(voxel);
auto x = ((vxl.grid_index_.cast<float>() +
Eigen::Vector3f(0.5, 0.5, 0.5)) *
voxel_size_) +
origin_;
Eigen::Vector3f pts[8];
GetVoxelBoundingPoints(x, r, pts);
#pragma unroll
for (int i = 0; i < 8; ++i) {
auto x_trans = rot_ * pts[i] + trans_;
auto uvz = intrinsic_ * x_trans;
float z = uvz(2);
float u = uvz(0) / z;
float v = uvz(1) / z;
float d;
bool within_boundary;
thrust::tie(within_boundary, d) =
FloatValueAt(image_, u, v, width_, height_,
num_of_channels_, bytes_per_channel_);
if ((!within_boundary && keep_voxels_outside_image_) ||
(within_boundary && d > 0 && z >= d)) {
carve = false;
break;
}
}
return carve;
}
};
} // namespace
VoxelGrid::VoxelGrid() : GeometryBase3D(Geometry::GeometryType::VoxelGrid) {}
VoxelGrid::~VoxelGrid() {}
VoxelGrid::VoxelGrid(const VoxelGrid &src_voxel_grid)
: GeometryBase3D(Geometry::GeometryType::VoxelGrid),
voxel_size_(src_voxel_grid.voxel_size_),
origin_(src_voxel_grid.origin_),
voxels_keys_(src_voxel_grid.voxels_keys_),
voxels_values_(src_voxel_grid.voxels_values_) {}
std::pair<thrust::host_vector<Eigen::Vector3i>, thrust::host_vector<Voxel>>
VoxelGrid::GetVoxels() const {
thrust::host_vector<Eigen::Vector3i> h_keys = voxels_keys_;
thrust::host_vector<Voxel> h_values = voxels_values_;
return std::make_pair(h_keys, h_values);
}
void VoxelGrid::SetVoxels(
const thrust::host_vector<Eigen::Vector3i> &voxels_keys,
const thrust::host_vector<Voxel> &voxels_values) {
voxels_keys_ = voxels_keys;
voxels_values_ = voxels_values;
}
VoxelGrid &VoxelGrid::Clear() {
voxel_size_ = 0.0;
origin_ = Eigen::Vector3f::Zero();
voxels_keys_.clear();
voxels_values_.clear();
return *this;
}
bool VoxelGrid::IsEmpty() const { return voxels_keys_.empty(); }
Eigen::Vector3f VoxelGrid::GetMinBound() const {
if (voxels_keys_.empty()) {
return origin_;
} else {
Eigen::Vector3i init = voxels_keys_[0];
Eigen::Vector3i min_grid_index =
thrust::reduce(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(), init,
thrust::elementwise_minimum<Eigen::Vector3i>());
return min_grid_index.cast<float>() * voxel_size_ + origin_;
}
}
Eigen::Vector3f VoxelGrid::GetMaxBound() const {
if (voxels_keys_.empty()) {
return origin_;
} else {
Eigen::Vector3i init = voxels_keys_[0];
Eigen::Vector3i max_grid_index =
thrust::reduce(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(), init,
thrust::elementwise_maximum<Eigen::Vector3i>());
return (max_grid_index.cast<float>() + Eigen::Vector3f::Ones()) *
voxel_size_ +
origin_;
}
}
Eigen::Vector3f VoxelGrid::GetCenter() const {
Eigen::Vector3f init(0, 0, 0);
if (voxels_keys_.empty()) {
return init;
}
compute_grid_center_functor func(voxel_size_, origin_);
Eigen::Vector3f center = thrust::transform_reduce(
utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), func, init, thrust::plus<Eigen::Vector3f>());
center /= float(voxels_values_.size());
return center;
}
AxisAlignedBoundingBox<3> VoxelGrid::GetAxisAlignedBoundingBox() const {
AxisAlignedBoundingBox<3> box;
box.min_bound_ = GetMinBound();
box.max_bound_ = GetMaxBound();
return box;
}
OrientedBoundingBox VoxelGrid::GetOrientedBoundingBox() const {
return OrientedBoundingBox::CreateFromAxisAlignedBoundingBox(
GetAxisAlignedBoundingBox());
}
VoxelGrid &VoxelGrid::Transform(const Eigen::Matrix4f &transformation) {
utility::LogError("VoxelGrid::Transform is not supported");
return *this;
}
VoxelGrid &VoxelGrid::Translate(const Eigen::Vector3f &translation,
bool relative) {
origin_ += translation;
return *this;
}
VoxelGrid &VoxelGrid::Scale(const float scale, bool center) {
voxel_size_ *= scale;
return *this;
}
VoxelGrid &VoxelGrid::Rotate(const Eigen::Matrix3f &R, bool center) {
utility::LogError("VoxelGrid::Rotate is not supported");
return *this;
}
VoxelGrid &VoxelGrid::operator+=(const VoxelGrid &voxelgrid) {
if (voxel_size_ != voxelgrid.voxel_size_) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid because voxel_size "
"differs (this=%f, other=%f)",
voxel_size_, voxelgrid.voxel_size_);
}
if (origin_ != voxelgrid.origin_) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid because origin "
"differs (this=%f,%f,%f, other=%f,%f,%f)",
origin_(0), origin_(1), origin_(2), voxelgrid.origin_(0),
voxelgrid.origin_(1), voxelgrid.origin_(2));
}
if (this->HasColors() != voxelgrid.HasColors()) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid one has colors and "
"the other not.");
}
if (voxelgrid.HasColors()) {
voxels_keys_.insert(voxels_keys_.end(), voxelgrid.voxels_keys_.begin(),
voxelgrid.voxels_keys_.end());
voxels_values_.insert(voxels_values_.end(),
voxelgrid.voxels_values_.begin(),
voxelgrid.voxels_values_.end());
thrust::sort_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
utility::device_vector<int> counts(voxels_keys_.size());
utility::device_vector<Eigen::Vector3i> new_keys(voxels_keys_.size());
auto end = thrust::reduce_by_key(
utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(),
make_tuple_iterator(voxels_values_.begin(),
thrust::make_constant_iterator(1)),
new_keys.begin(), make_tuple_begin(voxels_values_, counts),
thrust::equal_to<Eigen::Vector3i>(), add_voxel_color_functor());
resize_all(thrust::distance(new_keys.begin(), end.first), new_keys,
voxels_values_);
thrust::swap(voxels_keys_, new_keys);
thrust::transform(voxels_values_.begin(), voxels_values_.end(),
counts.begin(), voxels_values_.begin(),
devide_voxel_color_functor());
} else {
this->AddVoxels(voxelgrid.voxels_values_);
}
return *this;
}
VoxelGrid VoxelGrid::operator+(const VoxelGrid &voxelgrid) const {
return (VoxelGrid(*this) += voxelgrid);
}
void VoxelGrid::AddVoxel(const Voxel &voxel) {
voxels_keys_.push_back(voxel.grid_index_);
voxels_values_.push_back(voxel);
thrust::sort_by_key(utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), voxels_values_.begin());
auto end = thrust::unique_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
resize_all(thrust::distance(voxels_keys_.begin(), end.first), voxels_keys_,
voxels_values_);
}
void VoxelGrid::AddVoxels(const utility::device_vector<Voxel> &voxels) {
voxels_keys_.insert(voxels_keys_.end(),
thrust::make_transform_iterator(
voxels.begin(), extract_grid_index_functor()),
thrust::make_transform_iterator(
voxels.end(), extract_grid_index_functor()));
voxels_values_.insert(voxels_values_.end(), voxels.begin(), voxels.end());
thrust::sort_by_key(utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), voxels_values_.begin());
auto end = thrust::unique_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
resize_all(thrust::distance(voxels_keys_.begin(), end.first), voxels_keys_,
voxels_values_);
}
void VoxelGrid::AddVoxels(const thrust::host_vector<Voxel> &voxels) {
utility::device_vector<Voxel> voxels_dev = voxels;
AddVoxels(voxels_dev);
}
VoxelGrid &VoxelGrid::PaintUniformColor(const Eigen::Vector3f &color) {
thrust::for_each(voxels_values_.begin(), voxels_values_.end(),
[c = color] __device__(Voxel & v) { v.color_ = c; });
return *this;
}
VoxelGrid &VoxelGrid::PaintIndexedColor(
const utility::device_vector<size_t> &indices,
const Eigen::Vector3f &color) {
thrust::for_each(thrust::make_permutation_iterator(voxels_values_.begin(),
indices.begin()),
thrust::make_permutation_iterator(voxels_values_.begin(),
indices.end()),
[c = color] __device__(Voxel & v) { v.color_ = c; });
return *this;
}
Eigen::Vector3i VoxelGrid::GetVoxel(const Eigen::Vector3f &point) const {
Eigen::Vector3f voxel_f = (point - origin_) / voxel_size_;
return (Eigen::floor(voxel_f.array())).cast<int>();
}
Eigen::Vector3f VoxelGrid::GetVoxelCenterCoordinate(
const Eigen::Vector3i &idx) const {
auto it = thrust::find(voxels_keys_.begin(), voxels_keys_.end(), idx);
if (it != voxels_keys_.end()) {
Eigen::Vector3i voxel_idx = *it;
return ((voxel_idx.cast<float>() + Eigen::Vector3f(0.5, 0.5, 0.5)) *
voxel_size_) +
origin_;
} else {
return Eigen::Vector3f::Zero();
}
}
std::array<Eigen::Vector3f, 8> VoxelGrid::GetVoxelBoundingPoints(
const Eigen::Vector3i &index) const {
float r = voxel_size_ / 2.0;
auto x = GetVoxelCenterCoordinate(index);
std::array<Eigen::Vector3f, 8> points;
::GetVoxelBoundingPoints(x, r, points.data());
return points;
}
thrust::host_vector<bool> VoxelGrid::CheckIfIncluded(
const thrust::host_vector<Eigen::Vector3f> &queries) {
thrust::host_vector<bool> output;
output.resize(queries.size());
for (size_t i = 0; i < queries.size(); ++i) {
auto query = GetVoxel(queries[i]);
auto itr =
thrust::find(voxels_keys_.begin(), voxels_keys_.end(), query);
output[i] = (itr != voxels_keys_.end());
}
return output;
}
VoxelGrid &VoxelGrid::CarveDepthMap(
const Image &depth_map,
const camera::PinholeCameraParameters &camera_parameter,
bool keep_voxels_outside_image) {
if (depth_map.height_ != camera_parameter.intrinsic_.height_ ||
depth_map.width_ != camera_parameter.intrinsic_.width_) {
utility::LogError(
"[VoxelGrid] provided depth_map dimensions are not compatible "
"with the provided camera_parameters");
}
auto rot = camera_parameter.extrinsic_.block<3, 3>(0, 0);
auto trans = camera_parameter.extrinsic_.block<3, 1>(0, 3);
auto intrinsic = camera_parameter.intrinsic_.intrinsic_matrix_;
// get for each voxel if it projects to a valid pixel and check if the voxel
// depth is behind the depth of the depth map at the projected pixel.
compute_carve_functor func(
thrust::raw_pointer_cast(depth_map.data_.data()), depth_map.width_,
depth_map.height_, depth_map.num_of_channels_,
depth_map.bytes_per_channel_, voxel_size_, origin_, intrinsic, rot,
trans, keep_voxels_outside_image);
remove_if_vectors(utility::exec_policy(0), func, voxels_keys_,
voxels_values_);
return *this;
}
VoxelGrid &VoxelGrid::CarveSilhouette(
const Image &silhouette_mask,
const camera::PinholeCameraParameters &camera_parameter,
bool keep_voxels_outside_image) {
if (silhouette_mask.height_ != camera_parameter.intrinsic_.height_ ||
silhouette_mask.width_ != camera_parameter.intrinsic_.width_) {
utility::LogError(
"[VoxelGrid] provided silhouette_mask dimensions are not "
"compatible with the provided camera_parameters");
}
auto rot = camera_parameter.extrinsic_.block<3, 3>(0, 0);
auto trans = camera_parameter.extrinsic_.block<3, 1>(0, 3);
auto intrinsic = camera_parameter.intrinsic_.intrinsic_matrix_;
// get for each voxel if it projects to a valid pixel and check if the pixel
// is set (>0).
compute_carve_functor func(
thrust::raw_pointer_cast(silhouette_mask.data_.data()),
silhouette_mask.width_, silhouette_mask.height_,
silhouette_mask.num_of_channels_,
silhouette_mask.bytes_per_channel_, voxel_size_, origin_, intrinsic,
rot, trans, keep_voxels_outside_image);
remove_if_vectors(func, voxels_keys_, voxels_values_);
return *this;
}
std::shared_ptr<VoxelGrid> VoxelGrid::SelectByIndex(
const utility::device_vector<size_t> &indices,
bool invert) {
auto dst = std::make_shared<VoxelGrid>();
if (invert) {
size_t n_out = voxels_values_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(utility::exec_policy(0), sorted_indices.begin(),
sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(voxels_values_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
dst->voxels_values_.resize(inv_indices.size());
dst->voxels_keys_.resize(inv_indices.size());
dst->voxel_size_ = voxel_size_;
dst->origin_ = origin_;
thrust::gather(utility::exec_policy(utility::GetStream(0)),
inv_indices.begin(), inv_indices.end(), voxels_values_.begin(),
dst->voxels_values_.begin());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
inv_indices.begin(), inv_indices.end(), voxels_keys_.begin(),
dst->voxels_keys_.begin());
cudaSafeCall(hipDeviceSynchronize());
} else {
dst->voxels_values_.resize(indices.size());
dst->voxels_keys_.resize(indices.size());
dst->voxel_size_ = voxel_size_;
dst->origin_ = origin_;
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), voxels_values_.begin(),
dst->voxels_values_.begin());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), voxels_keys_.begin(),
dst->voxels_keys_.begin());
cudaSafeCall(hipDeviceSynchronize());
}
return dst;
}
| 972365483d44244436cece97007076cc0de835c8.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/set_operations.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/gather.h>
#include "cupoch/camera/pinhole_camera_parameters.h"
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/image.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/platform.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
struct extract_grid_index_functor {
__device__ Eigen::Vector3i operator()(const Voxel &voxel) const {
return voxel.grid_index_;
}
};
__host__ __device__ void GetVoxelBoundingPoints(const Eigen::Vector3f &x,
float r,
Eigen::Vector3f points[8]) {
points[0] = x + Eigen::Vector3f(-r, -r, -r);
points[1] = x + Eigen::Vector3f(-r, -r, r);
points[2] = x + Eigen::Vector3f(r, -r, -r);
points[3] = x + Eigen::Vector3f(r, -r, r);
points[4] = x + Eigen::Vector3f(-r, r, -r);
points[5] = x + Eigen::Vector3f(-r, r, r);
points[6] = x + Eigen::Vector3f(r, r, -r);
points[7] = x + Eigen::Vector3f(r, r, r);
}
struct compute_carve_functor {
compute_carve_functor(const uint8_t *image,
int width,
int height,
int num_of_channels,
int bytes_per_channel,
float voxel_size,
const Eigen::Vector3f &origin,
const Eigen::Matrix3f &intrinsic,
const Eigen::Matrix3f &rot,
const Eigen::Vector3f &trans,
bool keep_voxels_outside_image)
: image_(image),
width_(width),
height_(height),
num_of_channels_(num_of_channels),
bytes_per_channel_(bytes_per_channel),
voxel_size_(voxel_size),
origin_(origin),
intrinsic_(intrinsic),
rot_(rot),
trans_(trans),
keep_voxels_outside_image_(keep_voxels_outside_image){};
const uint8_t *image_;
const int width_;
const int height_;
const int num_of_channels_;
const int bytes_per_channel_;
const float voxel_size_;
const Eigen::Vector3f origin_;
const Eigen::Matrix3f intrinsic_;
const Eigen::Matrix3f rot_;
const Eigen::Vector3f trans_;
bool keep_voxels_outside_image_;
__device__ bool operator()(
const thrust::tuple<Eigen::Vector3i, Voxel> &voxel) const {
bool carve = true;
float r = voxel_size_ / 2.0;
const Voxel &vxl = thrust::get<1>(voxel);
auto x = ((vxl.grid_index_.cast<float>() +
Eigen::Vector3f(0.5, 0.5, 0.5)) *
voxel_size_) +
origin_;
Eigen::Vector3f pts[8];
GetVoxelBoundingPoints(x, r, pts);
#pragma unroll
for (int i = 0; i < 8; ++i) {
auto x_trans = rot_ * pts[i] + trans_;
auto uvz = intrinsic_ * x_trans;
float z = uvz(2);
float u = uvz(0) / z;
float v = uvz(1) / z;
float d;
bool within_boundary;
thrust::tie(within_boundary, d) =
FloatValueAt(image_, u, v, width_, height_,
num_of_channels_, bytes_per_channel_);
if ((!within_boundary && keep_voxels_outside_image_) ||
(within_boundary && d > 0 && z >= d)) {
carve = false;
break;
}
}
return carve;
}
};
} // namespace
VoxelGrid::VoxelGrid() : GeometryBase3D(Geometry::GeometryType::VoxelGrid) {}
VoxelGrid::~VoxelGrid() {}
VoxelGrid::VoxelGrid(const VoxelGrid &src_voxel_grid)
: GeometryBase3D(Geometry::GeometryType::VoxelGrid),
voxel_size_(src_voxel_grid.voxel_size_),
origin_(src_voxel_grid.origin_),
voxels_keys_(src_voxel_grid.voxels_keys_),
voxels_values_(src_voxel_grid.voxels_values_) {}
std::pair<thrust::host_vector<Eigen::Vector3i>, thrust::host_vector<Voxel>>
VoxelGrid::GetVoxels() const {
thrust::host_vector<Eigen::Vector3i> h_keys = voxels_keys_;
thrust::host_vector<Voxel> h_values = voxels_values_;
return std::make_pair(h_keys, h_values);
}
void VoxelGrid::SetVoxels(
const thrust::host_vector<Eigen::Vector3i> &voxels_keys,
const thrust::host_vector<Voxel> &voxels_values) {
voxels_keys_ = voxels_keys;
voxels_values_ = voxels_values;
}
VoxelGrid &VoxelGrid::Clear() {
voxel_size_ = 0.0;
origin_ = Eigen::Vector3f::Zero();
voxels_keys_.clear();
voxels_values_.clear();
return *this;
}
bool VoxelGrid::IsEmpty() const { return voxels_keys_.empty(); }
Eigen::Vector3f VoxelGrid::GetMinBound() const {
if (voxels_keys_.empty()) {
return origin_;
} else {
Eigen::Vector3i init = voxels_keys_[0];
Eigen::Vector3i min_grid_index =
thrust::reduce(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(), init,
thrust::elementwise_minimum<Eigen::Vector3i>());
return min_grid_index.cast<float>() * voxel_size_ + origin_;
}
}
Eigen::Vector3f VoxelGrid::GetMaxBound() const {
if (voxels_keys_.empty()) {
return origin_;
} else {
Eigen::Vector3i init = voxels_keys_[0];
Eigen::Vector3i max_grid_index =
thrust::reduce(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(), init,
thrust::elementwise_maximum<Eigen::Vector3i>());
return (max_grid_index.cast<float>() + Eigen::Vector3f::Ones()) *
voxel_size_ +
origin_;
}
}
Eigen::Vector3f VoxelGrid::GetCenter() const {
Eigen::Vector3f init(0, 0, 0);
if (voxels_keys_.empty()) {
return init;
}
compute_grid_center_functor func(voxel_size_, origin_);
Eigen::Vector3f center = thrust::transform_reduce(
utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), func, init, thrust::plus<Eigen::Vector3f>());
center /= float(voxels_values_.size());
return center;
}
AxisAlignedBoundingBox<3> VoxelGrid::GetAxisAlignedBoundingBox() const {
AxisAlignedBoundingBox<3> box;
box.min_bound_ = GetMinBound();
box.max_bound_ = GetMaxBound();
return box;
}
OrientedBoundingBox VoxelGrid::GetOrientedBoundingBox() const {
return OrientedBoundingBox::CreateFromAxisAlignedBoundingBox(
GetAxisAlignedBoundingBox());
}
VoxelGrid &VoxelGrid::Transform(const Eigen::Matrix4f &transformation) {
utility::LogError("VoxelGrid::Transform is not supported");
return *this;
}
VoxelGrid &VoxelGrid::Translate(const Eigen::Vector3f &translation,
bool relative) {
origin_ += translation;
return *this;
}
VoxelGrid &VoxelGrid::Scale(const float scale, bool center) {
voxel_size_ *= scale;
return *this;
}
VoxelGrid &VoxelGrid::Rotate(const Eigen::Matrix3f &R, bool center) {
utility::LogError("VoxelGrid::Rotate is not supported");
return *this;
}
VoxelGrid &VoxelGrid::operator+=(const VoxelGrid &voxelgrid) {
if (voxel_size_ != voxelgrid.voxel_size_) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid because voxel_size "
"differs (this=%f, other=%f)",
voxel_size_, voxelgrid.voxel_size_);
}
if (origin_ != voxelgrid.origin_) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid because origin "
"differs (this=%f,%f,%f, other=%f,%f,%f)",
origin_(0), origin_(1), origin_(2), voxelgrid.origin_(0),
voxelgrid.origin_(1), voxelgrid.origin_(2));
}
if (this->HasColors() != voxelgrid.HasColors()) {
utility::LogError(
"[VoxelGrid] Could not combine VoxelGrid one has colors and "
"the other not.");
}
if (voxelgrid.HasColors()) {
voxels_keys_.insert(voxels_keys_.end(), voxelgrid.voxels_keys_.begin(),
voxelgrid.voxels_keys_.end());
voxels_values_.insert(voxels_values_.end(),
voxelgrid.voxels_values_.begin(),
voxelgrid.voxels_values_.end());
thrust::sort_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
utility::device_vector<int> counts(voxels_keys_.size());
utility::device_vector<Eigen::Vector3i> new_keys(voxels_keys_.size());
auto end = thrust::reduce_by_key(
utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(),
make_tuple_iterator(voxels_values_.begin(),
thrust::make_constant_iterator(1)),
new_keys.begin(), make_tuple_begin(voxels_values_, counts),
thrust::equal_to<Eigen::Vector3i>(), add_voxel_color_functor());
resize_all(thrust::distance(new_keys.begin(), end.first), new_keys,
voxels_values_);
thrust::swap(voxels_keys_, new_keys);
thrust::transform(voxels_values_.begin(), voxels_values_.end(),
counts.begin(), voxels_values_.begin(),
devide_voxel_color_functor());
} else {
this->AddVoxels(voxelgrid.voxels_values_);
}
return *this;
}
VoxelGrid VoxelGrid::operator+(const VoxelGrid &voxelgrid) const {
return (VoxelGrid(*this) += voxelgrid);
}
void VoxelGrid::AddVoxel(const Voxel &voxel) {
voxels_keys_.push_back(voxel.grid_index_);
voxels_values_.push_back(voxel);
thrust::sort_by_key(utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), voxels_values_.begin());
auto end = thrust::unique_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
resize_all(thrust::distance(voxels_keys_.begin(), end.first), voxels_keys_,
voxels_values_);
}
void VoxelGrid::AddVoxels(const utility::device_vector<Voxel> &voxels) {
voxels_keys_.insert(voxels_keys_.end(),
thrust::make_transform_iterator(
voxels.begin(), extract_grid_index_functor()),
thrust::make_transform_iterator(
voxels.end(), extract_grid_index_functor()));
voxels_values_.insert(voxels_values_.end(), voxels.begin(), voxels.end());
thrust::sort_by_key(utility::exec_policy(0), voxels_keys_.begin(),
voxels_keys_.end(), voxels_values_.begin());
auto end = thrust::unique_by_key(utility::exec_policy(0),
voxels_keys_.begin(), voxels_keys_.end(),
voxels_values_.begin());
resize_all(thrust::distance(voxels_keys_.begin(), end.first), voxels_keys_,
voxels_values_);
}
void VoxelGrid::AddVoxels(const thrust::host_vector<Voxel> &voxels) {
utility::device_vector<Voxel> voxels_dev = voxels;
AddVoxels(voxels_dev);
}
VoxelGrid &VoxelGrid::PaintUniformColor(const Eigen::Vector3f &color) {
thrust::for_each(voxels_values_.begin(), voxels_values_.end(),
[c = color] __device__(Voxel & v) { v.color_ = c; });
return *this;
}
VoxelGrid &VoxelGrid::PaintIndexedColor(
const utility::device_vector<size_t> &indices,
const Eigen::Vector3f &color) {
thrust::for_each(thrust::make_permutation_iterator(voxels_values_.begin(),
indices.begin()),
thrust::make_permutation_iterator(voxels_values_.begin(),
indices.end()),
[c = color] __device__(Voxel & v) { v.color_ = c; });
return *this;
}
Eigen::Vector3i VoxelGrid::GetVoxel(const Eigen::Vector3f &point) const {
Eigen::Vector3f voxel_f = (point - origin_) / voxel_size_;
return (Eigen::floor(voxel_f.array())).cast<int>();
}
Eigen::Vector3f VoxelGrid::GetVoxelCenterCoordinate(
const Eigen::Vector3i &idx) const {
auto it = thrust::find(voxels_keys_.begin(), voxels_keys_.end(), idx);
if (it != voxels_keys_.end()) {
Eigen::Vector3i voxel_idx = *it;
return ((voxel_idx.cast<float>() + Eigen::Vector3f(0.5, 0.5, 0.5)) *
voxel_size_) +
origin_;
} else {
return Eigen::Vector3f::Zero();
}
}
std::array<Eigen::Vector3f, 8> VoxelGrid::GetVoxelBoundingPoints(
const Eigen::Vector3i &index) const {
float r = voxel_size_ / 2.0;
auto x = GetVoxelCenterCoordinate(index);
std::array<Eigen::Vector3f, 8> points;
::GetVoxelBoundingPoints(x, r, points.data());
return points;
}
thrust::host_vector<bool> VoxelGrid::CheckIfIncluded(
const thrust::host_vector<Eigen::Vector3f> &queries) {
thrust::host_vector<bool> output;
output.resize(queries.size());
for (size_t i = 0; i < queries.size(); ++i) {
auto query = GetVoxel(queries[i]);
auto itr =
thrust::find(voxels_keys_.begin(), voxels_keys_.end(), query);
output[i] = (itr != voxels_keys_.end());
}
return output;
}
VoxelGrid &VoxelGrid::CarveDepthMap(
const Image &depth_map,
const camera::PinholeCameraParameters &camera_parameter,
bool keep_voxels_outside_image) {
if (depth_map.height_ != camera_parameter.intrinsic_.height_ ||
depth_map.width_ != camera_parameter.intrinsic_.width_) {
utility::LogError(
"[VoxelGrid] provided depth_map dimensions are not compatible "
"with the provided camera_parameters");
}
auto rot = camera_parameter.extrinsic_.block<3, 3>(0, 0);
auto trans = camera_parameter.extrinsic_.block<3, 1>(0, 3);
auto intrinsic = camera_parameter.intrinsic_.intrinsic_matrix_;
// get for each voxel if it projects to a valid pixel and check if the voxel
// depth is behind the depth of the depth map at the projected pixel.
compute_carve_functor func(
thrust::raw_pointer_cast(depth_map.data_.data()), depth_map.width_,
depth_map.height_, depth_map.num_of_channels_,
depth_map.bytes_per_channel_, voxel_size_, origin_, intrinsic, rot,
trans, keep_voxels_outside_image);
remove_if_vectors(utility::exec_policy(0), func, voxels_keys_,
voxels_values_);
return *this;
}
VoxelGrid &VoxelGrid::CarveSilhouette(
const Image &silhouette_mask,
const camera::PinholeCameraParameters &camera_parameter,
bool keep_voxels_outside_image) {
if (silhouette_mask.height_ != camera_parameter.intrinsic_.height_ ||
silhouette_mask.width_ != camera_parameter.intrinsic_.width_) {
utility::LogError(
"[VoxelGrid] provided silhouette_mask dimensions are not "
"compatible with the provided camera_parameters");
}
auto rot = camera_parameter.extrinsic_.block<3, 3>(0, 0);
auto trans = camera_parameter.extrinsic_.block<3, 1>(0, 3);
auto intrinsic = camera_parameter.intrinsic_.intrinsic_matrix_;
// get for each voxel if it projects to a valid pixel and check if the pixel
// is set (>0).
compute_carve_functor func(
thrust::raw_pointer_cast(silhouette_mask.data_.data()),
silhouette_mask.width_, silhouette_mask.height_,
silhouette_mask.num_of_channels_,
silhouette_mask.bytes_per_channel_, voxel_size_, origin_, intrinsic,
rot, trans, keep_voxels_outside_image);
remove_if_vectors(func, voxels_keys_, voxels_values_);
return *this;
}
std::shared_ptr<VoxelGrid> VoxelGrid::SelectByIndex(
const utility::device_vector<size_t> &indices,
bool invert) {
auto dst = std::make_shared<VoxelGrid>();
if (invert) {
size_t n_out = voxels_values_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(utility::exec_policy(0), sorted_indices.begin(),
sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(voxels_values_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
dst->voxels_values_.resize(inv_indices.size());
dst->voxels_keys_.resize(inv_indices.size());
dst->voxel_size_ = voxel_size_;
dst->origin_ = origin_;
thrust::gather(utility::exec_policy(utility::GetStream(0)),
inv_indices.begin(), inv_indices.end(), voxels_values_.begin(),
dst->voxels_values_.begin());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
inv_indices.begin(), inv_indices.end(), voxels_keys_.begin(),
dst->voxels_keys_.begin());
cudaSafeCall(cudaDeviceSynchronize());
} else {
dst->voxels_values_.resize(indices.size());
dst->voxels_keys_.resize(indices.size());
dst->voxel_size_ = voxel_size_;
dst->origin_ = origin_;
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), voxels_values_.begin(),
dst->voxels_values_.begin());
thrust::gather(utility::exec_policy(utility::GetStream(0)),
indices.begin(), indices.end(), voxels_keys_.begin(),
dst->voxels_keys_.begin());
cudaSafeCall(cudaDeviceSynchronize());
}
return dst;
}
|
38fede7899c7aa04b9a23d2fd26d40ad37346fc5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/device_functions.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 100
#define ARRAY_SIZE 10
#define BLOCK_WIDTH 5
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_atomic(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d. blok %d. index \n ", blockIdx.x, i);
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
__syncthreads();
}
int main(int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int * d_array;
hipMalloc((void **)&d_array, ARRAY_BYTES);
hipMemset((void *)d_array, 0, ARRAY_BYTES);
timer.Start();
//printf("***************atomic adds result..*************\n");
//hipDeviceSynchronize();
increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_array);
timer.Stop();
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
hipFree(d_array);
return 0;
} | 38fede7899c7aa04b9a23d2fd26d40ad37346fc5.cu |
#include <device_functions.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 100
#define ARRAY_SIZE 10
#define BLOCK_WIDTH 5
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_atomic(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d. blok %d. index \n ", blockIdx.x, i);
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
__syncthreads();
}
int main(int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int * d_array;
cudaMalloc((void **)&d_array, ARRAY_BYTES);
cudaMemset((void *)d_array, 0, ARRAY_BYTES);
timer.Start();
//printf("***************atomic adds result..*************\n");
//cudaDeviceSynchronize();
increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_array);
timer.Stop();
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
cudaFree(d_array);
return 0;
} |
8d3d09d53b0fbdbcc968b0db22b825a5fd7f999b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/book.h"
#define N (33 * 1024)
__global__ void add( int *a, int *b, int *c ) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main( void ) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a = (int*)malloc( N * sizeof(int) );
b = (int*)malloc( N * sizeof(int) );
c = (int*)malloc( N * sizeof(int) );
// allocate the memory on the GPU
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( add), dim3(128),dim3(128), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost );
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++) {
if ((a[i] + b[i]) != c[i]) {
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n" );
// free the memory we allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
// free the memory we allocated on the CPU
free( a );
free( b );
free( c );
return 0;
}
| 8d3d09d53b0fbdbcc968b0db22b825a5fd7f999b.cu |
#include "common/book.h"
#define N (33 * 1024)
__global__ void add( int *a, int *b, int *c ) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main( void ) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a = (int*)malloc( N * sizeof(int) );
b = (int*)malloc( N * sizeof(int) );
c = (int*)malloc( N * sizeof(int) );
// allocate the memory on the GPU
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = 2 * i;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice );
add<<<128,128>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost );
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++) {
if ((a[i] + b[i]) != c[i]) {
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n" );
// free the memory we allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
// free the memory we allocated on the CPU
free( a );
free( b );
free( c );
return 0;
}
|
eda77454882683f4fb61381c360c76fe6c9e307c.hip | // !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 4: Parallel dotproduct implementation.
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define thread_per_block 1024
__host__ void RUN(hipError_t call)
{
hipError_t err = call;
if(err != hipSuccess)
{
fprintf(stderr, " Failed with error code %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ void wrapReduce(float *sdata, int tid, int blockSize)
{
if(blockSize >= 64)
{
sdata[tid] = sdata[tid] + sdata[tid + 32];
__syncthreads();
}
if(blockSize >=32)
{
sdata[tid] += sdata[tid + 16];
__syncthreads();
}
if(blockSize >=16)
{
sdata[tid] += sdata[tid + 8];
__syncthreads();
}
if(blockSize >=8)
{
sdata[tid] += sdata[tid + 4];
__syncthreads();
}
if(blockSize >=4)
{
sdata[tid] += sdata[tid + 2];
__syncthreads();
}
if(blockSize >=2)
sdata[tid] += sdata[tid + 1];
}
__global__ void dotproduct(float *gin, float *gout, int N, float *d_A, float *d_B, int flag, int blockSize)
{
__shared__ float sdata[thread_per_block];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
if(flag == 1)
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = d_A[i]*d_B[i] + d_A[i + blockDim.x]*d_B[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = d_A[i]*d_B[i];
}
else
{
sdata[tid] = 0;
}
}
else
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = gin[i] + gin[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = gin[i];
}
else
{
sdata[tid] = 0;
}
}
__syncthreads();
if(blockSize >= 1024){
if(tid < 512)
sdata[tid] = sdata[tid] + sdata[tid + 512];
__syncthreads();
}
if(blockSize >= 512){
if(tid < 256)
sdata[tid] = sdata[tid] + sdata[tid + 256];
__syncthreads();
}
if(blockSize >= 256){
if(tid < 128)
sdata[tid] = sdata[tid] + sdata[tid + 128];
__syncthreads();
}
if(blockSize >= 128){
if(tid < 64)
sdata[tid] = sdata[tid] + sdata[tid + 64];
__syncthreads();
}
if(tid < 32)
wrapReduce(sdata, tid, blockSize);
__syncthreads();
// writing in global mem
if(tid == 0)
gout[blockIdx.x] = sdata[0];
}
int main()
{
int test_case, k=1, current_block, call=1;
long int i, N;
float *d_A, *h_A, *d_B, *h_B, *gin, *gout, ms, temp;
double result=0;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
hipEvent_t startEvent, stopEvent;
RUN(hipSetDevice(0));
while(test_case)
{
RUN(hipEventCreate(&startEvent));
RUN(hipEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of vectors:");
scanf("%ld", &N);
printf(" %ld\n", N);
h_A = (float *)malloc(N*sizeof(float));
h_B = (float *)malloc(N*sizeof(float));
printf("\n Enter entries of 1st vector A:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_A[i]);
}
printf("\n Enter entries of 2st vector B:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_B[i]);
}
RUN(hipMalloc((void **)&d_A, N*sizeof(float)));
RUN(hipMalloc((void **)&d_B, N*sizeof(float)));
RUN(hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice));
RUN(hipMemcpy(d_B, h_B, N*sizeof(float), hipMemcpyHostToDevice));
if(N >= 1024)
{
current_block = N/(2*thread_per_block);
call = 1;
while(current_block > 1024)
{
current_block = current_block/(2*thread_per_block);
call = call +1;
}
current_block = N;
ms = 0;
for(i=1; i<=call; i++)
{
//printf("\n call : %d\n", call);
if(current_block%(2*thread_per_block) == 0)
{
current_block = current_block/(2*thread_per_block);
}
else
{
current_block = current_block/(2*thread_per_block);
current_block++;
}
//printf("\n current block : %d\n", current_block);
RUN(hipMalloc((void **)&gout, current_block*sizeof(float)));
dim3 grid(current_block, 1,1);
dim3 block(thread_per_block, 1,1);
RUN(hipEventRecord(startEvent,0));
hipLaunchKernelGGL(( dotproduct), dim3(grid), dim3(block), 0, 0, gin, gout, N, d_A, d_B, i, thread_per_block);
RUN(hipEventRecord(stopEvent,0));
RUN(hipEventSynchronize(stopEvent));
RUN(hipEventElapsedTime(&temp, startEvent, stopEvent));
ms = ms + temp;
if(i!=1)
{
hipFree(gin);
}
RUN(hipMalloc((void **)&gin, current_block*sizeof(float)));
RUN(hipMemcpy(gin, gout, current_block*sizeof(float), hipMemcpyDeviceToDevice));
hipFree(gout);
}
RUN(hipGetLastError());
//host code to calculate last partial sum
free(h_A);
h_A = (float *)malloc(current_block*sizeof(float));
RUN(hipMemcpy(h_A, gin, current_block*sizeof(float), hipMemcpyDeviceToHost)); //tread_per_block == 1024
hipFree(gin);
for(i=0; i<current_block; i++)
{
result = result + h_A[i];
}
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
hipFree(d_A);
hipFree(d_B);
RUN(hipEventDestroy(startEvent));
RUN(hipEventDestroy(stopEvent));
}
else
{
for(i=0; i<N; i++)
{
result = result + h_A[i]*h_B[i];
}
}
printf("\nDot Product of given vectors: %.2f\n", result);
printf("\n End of test case: %d\n", k);
free(h_A);
free(h_B);
result = 0;
test_case = test_case -1;
k = k+1;
}
printf("\n All test cases complete\n");
return 0;
} | eda77454882683f4fb61381c360c76fe6c9e307c.cu | /* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 4: Parallel dotproduct implementation.
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define thread_per_block 1024
__host__ void RUN(cudaError_t call)
{
cudaError_t err = call;
if(err != cudaSuccess)
{
fprintf(stderr, " Failed with error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ void wrapReduce(float *sdata, int tid, int blockSize)
{
if(blockSize >= 64)
{
sdata[tid] = sdata[tid] + sdata[tid + 32];
__syncthreads();
}
if(blockSize >=32)
{
sdata[tid] += sdata[tid + 16];
__syncthreads();
}
if(blockSize >=16)
{
sdata[tid] += sdata[tid + 8];
__syncthreads();
}
if(blockSize >=8)
{
sdata[tid] += sdata[tid + 4];
__syncthreads();
}
if(blockSize >=4)
{
sdata[tid] += sdata[tid + 2];
__syncthreads();
}
if(blockSize >=2)
sdata[tid] += sdata[tid + 1];
}
__global__ void dotproduct(float *gin, float *gout, int N, float *d_A, float *d_B, int flag, int blockSize)
{
__shared__ float sdata[thread_per_block];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
if(flag == 1)
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = d_A[i]*d_B[i] + d_A[i + blockDim.x]*d_B[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = d_A[i]*d_B[i];
}
else
{
sdata[tid] = 0;
}
}
else
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = gin[i] + gin[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = gin[i];
}
else
{
sdata[tid] = 0;
}
}
__syncthreads();
if(blockSize >= 1024){
if(tid < 512)
sdata[tid] = sdata[tid] + sdata[tid + 512];
__syncthreads();
}
if(blockSize >= 512){
if(tid < 256)
sdata[tid] = sdata[tid] + sdata[tid + 256];
__syncthreads();
}
if(blockSize >= 256){
if(tid < 128)
sdata[tid] = sdata[tid] + sdata[tid + 128];
__syncthreads();
}
if(blockSize >= 128){
if(tid < 64)
sdata[tid] = sdata[tid] + sdata[tid + 64];
__syncthreads();
}
if(tid < 32)
wrapReduce(sdata, tid, blockSize);
__syncthreads();
// writing in global mem
if(tid == 0)
gout[blockIdx.x] = sdata[0];
}
int main()
{
int test_case, k=1, current_block, call=1;
long int i, N;
float *d_A, *h_A, *d_B, *h_B, *gin, *gout, ms, temp;
double result=0;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
cudaEvent_t startEvent, stopEvent;
RUN(cudaSetDevice(0));
while(test_case)
{
RUN(cudaEventCreate(&startEvent));
RUN(cudaEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of vectors:");
scanf("%ld", &N);
printf(" %ld\n", N);
h_A = (float *)malloc(N*sizeof(float));
h_B = (float *)malloc(N*sizeof(float));
printf("\n Enter entries of 1st vector A:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_A[i]);
}
printf("\n Enter entries of 2st vector B:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_B[i]);
}
RUN(cudaMalloc((void **)&d_A, N*sizeof(float)));
RUN(cudaMalloc((void **)&d_B, N*sizeof(float)));
RUN(cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice));
RUN(cudaMemcpy(d_B, h_B, N*sizeof(float), cudaMemcpyHostToDevice));
if(N >= 1024)
{
current_block = N/(2*thread_per_block);
call = 1;
while(current_block > 1024)
{
current_block = current_block/(2*thread_per_block);
call = call +1;
}
current_block = N;
ms = 0;
for(i=1; i<=call; i++)
{
//printf("\n call : %d\n", call);
if(current_block%(2*thread_per_block) == 0)
{
current_block = current_block/(2*thread_per_block);
}
else
{
current_block = current_block/(2*thread_per_block);
current_block++;
}
//printf("\n current block : %d\n", current_block);
RUN(cudaMalloc((void **)&gout, current_block*sizeof(float)));
dim3 grid(current_block, 1,1);
dim3 block(thread_per_block, 1,1);
RUN(cudaEventRecord(startEvent,0));
dotproduct<<<grid, block>>>(gin, gout, N, d_A, d_B, i, thread_per_block);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&temp, startEvent, stopEvent));
ms = ms + temp;
if(i!=1)
{
cudaFree(gin);
}
RUN(cudaMalloc((void **)&gin, current_block*sizeof(float)));
RUN(cudaMemcpy(gin, gout, current_block*sizeof(float), cudaMemcpyDeviceToDevice));
cudaFree(gout);
}
RUN(cudaGetLastError());
//host code to calculate last partial sum
free(h_A);
h_A = (float *)malloc(current_block*sizeof(float));
RUN(cudaMemcpy(h_A, gin, current_block*sizeof(float), cudaMemcpyDeviceToHost)); //tread_per_block == 1024
cudaFree(gin);
for(i=0; i<current_block; i++)
{
result = result + h_A[i];
}
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
cudaFree(d_A);
cudaFree(d_B);
RUN(cudaEventDestroy(startEvent));
RUN(cudaEventDestroy(stopEvent));
}
else
{
for(i=0; i<N; i++)
{
result = result + h_A[i]*h_B[i];
}
}
printf("\nDot Product of given vectors: %.2f\n", result);
printf("\n End of test case: %d\n", k);
free(h_A);
free(h_B);
result = 0;
test_case = test_case -1;
k = k+1;
}
printf("\n All test cases complete\n");
return 0;
} |
ff1c13f1e5867cdfd33d480fced65cf580dfad6c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../include/core.cuh"
#include "../include/utils.cuh"
/******************************************************
* Functions for querying device properties
*******************************************************/
int VLMO_get_device_num(const bool verbose=false) {
/*
* Get the number of GPU device(s) on the machine
* Args
* verbose
*/
int num_devices;
cudaErrChk (hipGetDeviceCount (&num_devices));
if (verbose == true) {
printf("\n=================================================\n");
printf("The number of device(s) : %d\n", num_devices);
printf("=================================================\n\n");
}
return num_devices;
}
hipDeviceProp_t VLMO_get_device_properties(const int device_id, size_t* free, size_t* total, const bool verbose=false) {
/*
* Get properties of certain GPU device [device_id]
* Args
* device_id
* verbose
*/
// get device info
hipDeviceProp_t prop;
cudaErrChk ( hipSetDevice (device_id));
cudaErrChk ( hipGetDeviceProperties (&prop, device_id) );
// get memory info
if (free != NULL && total != NULL) {
hipDevice_t dev;
hipCtx_t ctx;
hipDeviceGet(&dev,device_id);
hipCtxCreate(&ctx, 0, dev);
cuMemGetInfo (free, total);
}
//
if (verbose == true) {
printf ("\n========================================================\n");
printf ("[System Environment]\n");
printf ("Device Number: %d\n", device_id);
printf (" Device name: %s\n", prop.name);
printf (" Device compute capability: %d.%d\n", prop.major, prop.minor);
printf (" Number of SM(s): %d\n", prop.multiProcessorCount);
printf (" Memory Clock Rate (GHz): %.2f\n",
((float)prop.memoryClockRate)/1.0e6);
printf (" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf (" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf ("\n[Kernel size]\n");
printf (" Maximum size of a grid [%d, %d, %d]\n"
, prop.maxGridSize[0], prop.maxGridSize[0], prop.maxGridSize[0]);
printf (" Maximum size of a block [%d]\n"
, prop.maxThreadsPerBlock);
if (free != NULL && total != NULL) {
printf ("\n[Global mem]\n");
printf (" Global memory size : %.3f GB\n", (float)(*total/1.0e9));
printf (" Free memory size : %.3f GB\n", (float)(*free/1.0e9));
}
printf ("\n[Shared mem]\n");
printf (" Shared memory size per block : %d KB\n", (int)(prop.sharedMemPerBlock/1.0e3));
printf ("\n========================================================\n");
}
return prop;
}
/******************************************************
* Functions for initiating program
*******************************************************/
void VLMO_init (VLMO_Operator_Descriptor_t& desc) {
}
/******************************************************
* Functions for managing device memory
*******************************************************/
void VLMO_malloc_device_mem (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
// size_t total_size = sizeof(float)*desc.A_h*desc.A_w + sizeof(float)*desc.B_h*desc.B_w + sizeof(float)*desc.C_h*desc.C_w;
if (desc.flag_unified_mem == true) {
VLMO_malloc_device_mem_unified (desc, verbose);
return ;
} else {
VLMO_malloc_device_mem_patch (desc, verbose);
return ;
}
}
void VLMO_malloc_device_mem_unified (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
// Allocate unified memory for A
if (desc.host_A != nullptr) {
cudaErrChk (hipMallocManaged (&desc.device_A[0], sizeof(float)*desc.A_h*desc.A_w));
memcpy (desc.device_A[0], desc.host_A, sizeof(float)*desc.A_h*desc.A_w);
free (desc.host_A);
desc.host_A = nullptr;
}
// Allocate unified memory for B
if (desc.host_B != nullptr) {
cudaErrChk (hipMallocManaged (&desc.device_B[0], sizeof(float)*desc.B_h*desc.B_w));
memcpy (desc.device_B[0], desc.host_B, sizeof(float)*desc.B_h*desc.B_w);
free (desc.host_B);
desc.host_B = nullptr;
}
// Allocate unified memory for C
if (desc.host_C != nullptr) {
cudaErrChk (hipMallocManaged (&desc.device_C[0], sizeof(float)*desc.C_h*desc.C_w));
memcpy (desc.device_C[0], desc.host_C, sizeof(float)*desc.C_h*desc.C_w);
free (desc.host_C);
desc.host_C = nullptr;
}
if (verbose == true) {
size_t total_size = sizeof(float)*desc.A_h*desc.A_w + sizeof(float)*desc.B_h*desc.B_w + sizeof(float)*desc.C_h*desc.C_w;
printf("[Mem] Unified memory allocation completed..\n");
printf(" mem usage : %.3f GB [free : %.3f GB]\n", total_size*1e-9, desc.mem_free_size*1e-9);
}
}
void VLMO_malloc_device_mem_patch (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
desc.flag_double_buffering = true;
cudaErrChk (hipStreamCreate (&desc.streams[0]));
cudaErrChk (hipStreamCreate (&desc.streams[1]));
if (desc.patch_h == 0 || desc.patch_w == 0) {
get_maximum_size_patch (desc);
}
size_t total_size_patch = sizeof (float) * desc.patch_h * desc.patch_w;
// Allocate unified memory for A
if (desc.host_A != nullptr) {
cudaErrChk (hipMalloc (&(desc.device_A[0]), total_size_patch));
cudaErrChk (hipMalloc (&(desc.device_A[1]), total_size_patch));
}
// Allocate unified memory for B
if (desc.host_B != nullptr) {
cudaErrChk (hipMalloc (&(desc.device_B[0]), total_size_patch));
cudaErrChk (hipMalloc (&(desc.device_B[1]), total_size_patch));
}
// Allocate unified memory for C
if (desc.host_C != nullptr) {
cudaErrChk (hipMalloc (&(desc.device_C[0]), total_size_patch));
cudaErrChk (hipMalloc (&(desc.device_C[1]), total_size_patch));
}
cudaErrChk (hipStreamSynchronize (desc.streams[0]));
cudaErrChk (hipGetLastError ());
if (verbose == true) {
printf("[Mem] Patch memory allocation completed..\n");
printf(" mem usage : %.3f GB [free : %.3f GB]\n", 6*total_size_patch*1e-9, desc.mem_free_size*1e-9);
}
}
void VLMO_clear_all (VLMO_Operator_Descriptor_t& desc) {
if (desc.host_A != nullptr)
free (desc.host_A);
if (desc.device_A[0] != nullptr)
cudaErrChk (hipFree (desc.device_A[0]));
if (desc.device_A[1] != nullptr)
cudaErrChk (hipFree (desc.device_A[1]));
if (desc.host_B != nullptr)
free (desc.host_B);
if (desc.device_B[0] != nullptr)
cudaErrChk (hipFree (desc.device_B[0]));
if (desc.device_B[1] != nullptr)
cudaErrChk (hipFree (desc.device_B[1]));
if (desc.host_C != nullptr)
free (desc.host_C);
if (desc.device_C[0] != nullptr)
cudaErrChk (hipFree (desc.device_C[0]));
if (desc.device_C[1] != nullptr)
cudaErrChk (hipFree (desc.device_C[1]));
if (desc.flag_double_buffering == true) {
hipStreamDestroy(desc.streams[0]);
hipStreamDestroy(desc.streams[1]);
}
}
| ff1c13f1e5867cdfd33d480fced65cf580dfad6c.cu | #include <cuda.h>
#include "../include/core.cuh"
#include "../include/utils.cuh"
/******************************************************
* Functions for querying device properties
*******************************************************/
int VLMO_get_device_num(const bool verbose=false) {
/*
* Get the number of GPU device(s) on the machine
* Args
* verbose
*/
int num_devices;
cudaErrChk (cudaGetDeviceCount (&num_devices));
if (verbose == true) {
printf("\n=================================================\n");
printf("The number of device(s) : %d\n", num_devices);
printf("=================================================\n\n");
}
return num_devices;
}
cudaDeviceProp VLMO_get_device_properties(const int device_id, size_t* free, size_t* total, const bool verbose=false) {
/*
* Get properties of certain GPU device [device_id]
* Args
* device_id
* verbose
*/
// get device info
cudaDeviceProp prop;
cudaErrChk ( cudaSetDevice (device_id));
cudaErrChk ( cudaGetDeviceProperties (&prop, device_id) );
// get memory info
if (free != NULL && total != NULL) {
CUdevice dev;
CUcontext ctx;
cuDeviceGet(&dev,device_id);
cuCtxCreate(&ctx, 0, dev);
cuMemGetInfo (free, total);
}
//
if (verbose == true) {
printf ("\n========================================================\n");
printf ("[System Environment]\n");
printf ("Device Number: %d\n", device_id);
printf (" Device name: %s\n", prop.name);
printf (" Device compute capability: %d.%d\n", prop.major, prop.minor);
printf (" Number of SM(s): %d\n", prop.multiProcessorCount);
printf (" Memory Clock Rate (GHz): %.2f\n",
((float)prop.memoryClockRate)/1.0e6);
printf (" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf (" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf ("\n[Kernel size]\n");
printf (" Maximum size of a grid [%d, %d, %d]\n"
, prop.maxGridSize[0], prop.maxGridSize[0], prop.maxGridSize[0]);
printf (" Maximum size of a block [%d]\n"
, prop.maxThreadsPerBlock);
if (free != NULL && total != NULL) {
printf ("\n[Global mem]\n");
printf (" Global memory size : %.3f GB\n", (float)(*total/1.0e9));
printf (" Free memory size : %.3f GB\n", (float)(*free/1.0e9));
}
printf ("\n[Shared mem]\n");
printf (" Shared memory size per block : %d KB\n", (int)(prop.sharedMemPerBlock/1.0e3));
printf ("\n========================================================\n");
}
return prop;
}
/******************************************************
* Functions for initiating program
*******************************************************/
void VLMO_init (VLMO_Operator_Descriptor_t& desc) {
}
/******************************************************
* Functions for managing device memory
*******************************************************/
void VLMO_malloc_device_mem (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
// size_t total_size = sizeof(float)*desc.A_h*desc.A_w + sizeof(float)*desc.B_h*desc.B_w + sizeof(float)*desc.C_h*desc.C_w;
if (desc.flag_unified_mem == true) {
VLMO_malloc_device_mem_unified (desc, verbose);
return ;
} else {
VLMO_malloc_device_mem_patch (desc, verbose);
return ;
}
}
void VLMO_malloc_device_mem_unified (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
// Allocate unified memory for A
if (desc.host_A != nullptr) {
cudaErrChk (cudaMallocManaged (&desc.device_A[0], sizeof(float)*desc.A_h*desc.A_w));
memcpy (desc.device_A[0], desc.host_A, sizeof(float)*desc.A_h*desc.A_w);
free (desc.host_A);
desc.host_A = nullptr;
}
// Allocate unified memory for B
if (desc.host_B != nullptr) {
cudaErrChk (cudaMallocManaged (&desc.device_B[0], sizeof(float)*desc.B_h*desc.B_w));
memcpy (desc.device_B[0], desc.host_B, sizeof(float)*desc.B_h*desc.B_w);
free (desc.host_B);
desc.host_B = nullptr;
}
// Allocate unified memory for C
if (desc.host_C != nullptr) {
cudaErrChk (cudaMallocManaged (&desc.device_C[0], sizeof(float)*desc.C_h*desc.C_w));
memcpy (desc.device_C[0], desc.host_C, sizeof(float)*desc.C_h*desc.C_w);
free (desc.host_C);
desc.host_C = nullptr;
}
if (verbose == true) {
size_t total_size = sizeof(float)*desc.A_h*desc.A_w + sizeof(float)*desc.B_h*desc.B_w + sizeof(float)*desc.C_h*desc.C_w;
printf("[Mem] Unified memory allocation completed..\n");
printf(" mem usage : %.3f GB [free : %.3f GB]\n", total_size*1e-9, desc.mem_free_size*1e-9);
}
}
void VLMO_malloc_device_mem_patch (VLMO_Operator_Descriptor_t& desc, const bool verbose=false) {
desc.flag_double_buffering = true;
cudaErrChk (cudaStreamCreate (&desc.streams[0]));
cudaErrChk (cudaStreamCreate (&desc.streams[1]));
if (desc.patch_h == 0 || desc.patch_w == 0) {
get_maximum_size_patch (desc);
}
size_t total_size_patch = sizeof (float) * desc.patch_h * desc.patch_w;
// Allocate unified memory for A
if (desc.host_A != nullptr) {
cudaErrChk (cudaMalloc (&(desc.device_A[0]), total_size_patch));
cudaErrChk (cudaMalloc (&(desc.device_A[1]), total_size_patch));
}
// Allocate unified memory for B
if (desc.host_B != nullptr) {
cudaErrChk (cudaMalloc (&(desc.device_B[0]), total_size_patch));
cudaErrChk (cudaMalloc (&(desc.device_B[1]), total_size_patch));
}
// Allocate unified memory for C
if (desc.host_C != nullptr) {
cudaErrChk (cudaMalloc (&(desc.device_C[0]), total_size_patch));
cudaErrChk (cudaMalloc (&(desc.device_C[1]), total_size_patch));
}
cudaErrChk (cudaStreamSynchronize (desc.streams[0]));
cudaErrChk (cudaGetLastError ());
if (verbose == true) {
printf("[Mem] Patch memory allocation completed..\n");
printf(" mem usage : %.3f GB [free : %.3f GB]\n", 6*total_size_patch*1e-9, desc.mem_free_size*1e-9);
}
}
void VLMO_clear_all (VLMO_Operator_Descriptor_t& desc) {
if (desc.host_A != nullptr)
free (desc.host_A);
if (desc.device_A[0] != nullptr)
cudaErrChk (cudaFree (desc.device_A[0]));
if (desc.device_A[1] != nullptr)
cudaErrChk (cudaFree (desc.device_A[1]));
if (desc.host_B != nullptr)
free (desc.host_B);
if (desc.device_B[0] != nullptr)
cudaErrChk (cudaFree (desc.device_B[0]));
if (desc.device_B[1] != nullptr)
cudaErrChk (cudaFree (desc.device_B[1]));
if (desc.host_C != nullptr)
free (desc.host_C);
if (desc.device_C[0] != nullptr)
cudaErrChk (cudaFree (desc.device_C[0]));
if (desc.device_C[1] != nullptr)
cudaErrChk (cudaFree (desc.device_C[1]));
if (desc.flag_double_buffering == true) {
cudaStreamDestroy(desc.streams[0]);
cudaStreamDestroy(desc.streams[1]);
}
}
|
acb3f1c6190de9b865f6c8e0aadfc48e4886fb5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "im2col.h"
#include "hip/hip_runtime.h"
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
// > Mixed precision kernel (templated)
template <typename T>
__global__ void im2col_gpu_kernel(int n, T* data_im, int height, int width, int ksize, int pad, int stride, int height_col, int width_col, T *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : T(0);
data_col_ptr += height_col * width_col;
}
}
}
}
// > Mixed precision kernel caller
template <typename T>
void im2col_gpu(T *im, int channels, int height, int width, int ksize, int stride, int pad, T *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, 0, num_kernels, im, height, width,
ksize, pad, stride, height_col, width_col, data_col);
}
void im2col_gpu(half_host *im, int channels, int height, int width, int ksize, int stride, int pad, half_host *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, 0, num_kernels, (half_device*)im, height, width,
ksize, pad, stride, height_col, width_col, (half_device*)data_col);
}
template void im2col_gpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col);
template void im2col_gpu(double *im, int channels, int height, int width, int ksize, int stride, int pad, double *data_col); | acb3f1c6190de9b865f6c8e0aadfc48e4886fb5d.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "im2col.h"
#include "cuda.h"
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
// > Mixed precision kernel (templated)
template <typename T>
__global__ void im2col_gpu_kernel(int n, T* data_im, int height, int width, int ksize, int pad, int stride, int height_col, int width_col, T *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : T(0);
data_col_ptr += height_col * width_col;
}
}
}
}
// > Mixed precision kernel caller
template <typename T>
void im2col_gpu(T *im, int channels, int height, int width, int ksize, int stride, int pad, T *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK>>>(num_kernels, im, height, width,
ksize, pad, stride, height_col, width_col, data_col);
}
void im2col_gpu(half_host *im, int channels, int height, int width, int ksize, int stride, int pad, half_host *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK>>>(num_kernels, (half_device*)im, height, width,
ksize, pad, stride, height_col, width_col, (half_device*)data_col);
}
template void im2col_gpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col);
template void im2col_gpu(double *im, int channels, int height, int width, int ksize, int stride, int pad, double *data_col); |
1cf6ded8cee5e25ea1f3e6e254a5d2ef93579cd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
sobel operation just uses a sobel operator to convolve a target matrix, so the key to improve performance
is how to tune the convolution. well, either you can write your own convolution algorithm or use cufft lib.
1. naive convolution algorithm. refer to this link: https://www.evl.uic.edu/sjames/cs525/final.html
2. cufft. This is a fine tuned algorithm, a little complicated to apply FFT to convolution.
a. Apply API hipfftExecR2C to kernel and taregt matrix;
b. mulplication both FFTed kernel and target matrix;
c. Inverse the result from step b.
d.
*/
#include "..\cumath\cumath.cuh"
#define K_SIZE 3
#define TILE_H 32
#define TILE_W 32
#define RADIUS 8
//#define LOOP_UNROLLING
__constant__ int sobelKernelXC[K_SIZE][K_SIZE] = { { -1,0,1 },{ -2,0,2 },{ -1,0,1 } };
__constant__ int sobelKernelYC[K_SIZE][K_SIZE] = { { -1,-2,-1 },{ 0,0,0 },{ 1,2,1 } };
__global__ void sobel(int *input, int height, int width, int radius, int R, size_t pitch, int *output)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int shared[TILE_H][TILE_W+16]; // add extra 16 columns to eliminate bank confict, because there is 2-way bank conflict if define share memory like __shared__ int shared[TILE_H][TILE_W]
if (row < height && col < width)
{
/*
actually, there will be some branch divergence when thread face if-else statement, which will affect the performance.
*/
// upper left corner in the block
if (row - R < 0 || col - R < 0)
shared[threadIdx.y][threadIdx.x] = 0;
else
shared[threadIdx.y][threadIdx.x] = *((int*)((char*)input + (row - R) * pitch) + (col - R));
// upper right
if (row - R < 0 || col + R > width - 1)
shared[threadIdx.y][threadIdx.x + blockDim.x] = 0;
else
shared[threadIdx.y][threadIdx.x + blockDim.x] = *((int*)((char*)input + (row - R) * pitch) + (col + R));
//bottom left
if (row + R > height - 1 || col - R < 0)
shared[threadIdx.y + blockDim.y][threadIdx.x] = 0;
else
shared[threadIdx.y + blockDim.y][threadIdx.x] = *((int*)((char*)input + (row + R) * pitch) + (col - R));
// bottom right
if (row + R > height - 1 || col - R > width - 1)
shared[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = 0;
else
shared[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = *((int*)((char*)input + (row + R) * pitch) + (col + R));
__syncthreads();
int sumx = 0, sumy = 0;
#ifdef LOOP_UNROLLING
// use loop unrolling to improve performance, it can avoid branching.
sumx = sobelKernelXC[radius - 1][radius - 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius - 1][radius] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS] +
sobelKernelXC[radius - 1][radius + 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS + 1] +
sobelKernelXC[radius][radius - 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius][radius] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS] +
sobelKernelXC[radius][radius + 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 1] +
sobelKernelXC[radius + 1][radius - 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius + 1][radius] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS] +
sobelKernelXC[radius + 1][radius + 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS + 1];
sumy = sobelKernelXC[radius - 1][radius - 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius - 1][radius] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS] +
sobelKernelYC[radius - 1][radius + 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS + 1] +
sobelKernelYC[radius][radius - 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius][radius] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS] +
sobelKernelYC[radius][radius + 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 1] +
sobelKernelYC[radius + 1][radius - 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius + 1][radius] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS] +
sobelKernelYC[radius + 1][radius + 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS + 1];
#else
for (int i = -radius; i <= radius; i++)
for (int j = -radius; j <= radius; j++)
{
sumx += sobelKernelXC[radius + i][radius + j] * shared[threadIdx.y + R - i][threadIdx.x + R - j];
sumy += sobelKernelYC[radius + i][radius + j] * shared[threadIdx.y + R - i][threadIdx.x + R - j];
}
#endif
//__syncthreads(); // wait current thread job done
int *out = (int*)((char*)output + row*pitch) + col;
*out = sqrtf(powf(sumx, 2) + powf(sumy, 2));
}
}
extern "C"
void cudaSobel(cv::Mat & input, cv::Mat & output)
{
input.convertTo(input, CV_32S);
output = cv::Mat(input.size(), CV_32S, cv::Scalar(0));
int *d_input, *d_output;
size_t pitch;
hipStream_t inputStream, outputStream;
CUDA_CALL(hipStreamCreate(&inputStream)); CUDA_CALL(hipStreamCreate(&outputStream));
CUDA_CALL(hipMallocPitch(&d_input, &pitch, sizeof(int)*input.cols, input.rows));
CUDA_CALL(hipMallocPitch(&d_output, &pitch, sizeof(int)*output.cols, output.rows));
CUDA_CALL(hipMemcpy2DAsync(d_input, pitch, input.data, sizeof(int)*input.cols, sizeof(int)*input.cols, input.rows, hipMemcpyHostToDevice, inputStream));
CUDA_CALL(hipMemcpy2DAsync(d_output, pitch, output.data, sizeof(int)*output.cols, sizeof(int)*output.cols, output.rows, hipMemcpyHostToDevice, outputStream));
dim3 threadSize(16, 16);
dim3 blockSize(input.cols / threadSize.x, input.rows / threadSize.y);
hipLaunchKernelGGL(( sobel), dim3(blockSize), dim3(threadSize), 0, 0, d_input, input.rows, input.cols, 1, 8, pitch, d_output);
CUDA_CALL(hipDeviceSynchronize());
// get data back
CUDA_CALL(hipMemcpy2D(output.data, sizeof(int)*output.cols, d_output, pitch, sizeof(int)*output.cols, output.rows, hipMemcpyDeviceToHost));
// resource releasing
hipFree(d_input); hipFree(d_output);
CUDA_CALL(hipStreamDestroy(inputStream)); CUDA_CALL(hipStreamDestroy(outputStream));
output.convertTo(output, CV_8U);
input.convertTo(input, CV_8U);
} | 1cf6ded8cee5e25ea1f3e6e254a5d2ef93579cd0.cu | /*
sobel operation just uses a sobel operator to convolve a target matrix, so the key to improve performance
is how to tune the convolution. well, either you can write your own convolution algorithm or use cufft lib.
1. naive convolution algorithm. refer to this link: https://www.evl.uic.edu/sjames/cs525/final.html
2. cufft. This is a fine tuned algorithm, a little complicated to apply FFT to convolution.
a. Apply API cufftExecR2C to kernel and taregt matrix;
b. mulplication both FFTed kernel and target matrix;
c. Inverse the result from step b.
d.
*/
#include "..\cumath\cumath.cuh"
#define K_SIZE 3
#define TILE_H 32
#define TILE_W 32
#define RADIUS 8
//#define LOOP_UNROLLING
__constant__ int sobelKernelXC[K_SIZE][K_SIZE] = { { -1,0,1 },{ -2,0,2 },{ -1,0,1 } };
__constant__ int sobelKernelYC[K_SIZE][K_SIZE] = { { -1,-2,-1 },{ 0,0,0 },{ 1,2,1 } };
__global__ void sobel(int *input, int height, int width, int radius, int R, size_t pitch, int *output)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int shared[TILE_H][TILE_W+16]; // add extra 16 columns to eliminate bank confict, because there is 2-way bank conflict if define share memory like __shared__ int shared[TILE_H][TILE_W]
if (row < height && col < width)
{
/*
actually, there will be some branch divergence when thread face if-else statement, which will affect the performance.
*/
// upper left corner in the block
if (row - R < 0 || col - R < 0)
shared[threadIdx.y][threadIdx.x] = 0;
else
shared[threadIdx.y][threadIdx.x] = *((int*)((char*)input + (row - R) * pitch) + (col - R));
// upper right
if (row - R < 0 || col + R > width - 1)
shared[threadIdx.y][threadIdx.x + blockDim.x] = 0;
else
shared[threadIdx.y][threadIdx.x + blockDim.x] = *((int*)((char*)input + (row - R) * pitch) + (col + R));
//bottom left
if (row + R > height - 1 || col - R < 0)
shared[threadIdx.y + blockDim.y][threadIdx.x] = 0;
else
shared[threadIdx.y + blockDim.y][threadIdx.x] = *((int*)((char*)input + (row + R) * pitch) + (col - R));
// bottom right
if (row + R > height - 1 || col - R > width - 1)
shared[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = 0;
else
shared[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = *((int*)((char*)input + (row + R) * pitch) + (col + R));
__syncthreads();
int sumx = 0, sumy = 0;
#ifdef LOOP_UNROLLING
// use loop unrolling to improve performance, it can avoid branching.
sumx = sobelKernelXC[radius - 1][radius - 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius - 1][radius] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS] +
sobelKernelXC[radius - 1][radius + 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS + 1] +
sobelKernelXC[radius][radius - 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius][radius] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS] +
sobelKernelXC[radius][radius + 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 1] +
sobelKernelXC[radius + 1][radius - 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS - 1] +
sobelKernelXC[radius + 1][radius] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS] +
sobelKernelXC[radius + 1][radius + 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS + 1];
sumy = sobelKernelXC[radius - 1][radius - 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius - 1][radius] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS] +
sobelKernelYC[radius - 1][radius + 1] * shared[threadIdx.y + RADIUS - 1][threadIdx.x + RADIUS + 1] +
sobelKernelYC[radius][radius - 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius][radius] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS] +
sobelKernelYC[radius][radius + 1] * shared[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 1] +
sobelKernelYC[radius + 1][radius - 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS - 1] +
sobelKernelYC[radius + 1][radius] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS] +
sobelKernelYC[radius + 1][radius + 1] * shared[threadIdx.y + RADIUS + 1][threadIdx.x + RADIUS + 1];
#else
for (int i = -radius; i <= radius; i++)
for (int j = -radius; j <= radius; j++)
{
sumx += sobelKernelXC[radius + i][radius + j] * shared[threadIdx.y + R - i][threadIdx.x + R - j];
sumy += sobelKernelYC[radius + i][radius + j] * shared[threadIdx.y + R - i][threadIdx.x + R - j];
}
#endif
//__syncthreads(); // wait current thread job done
int *out = (int*)((char*)output + row*pitch) + col;
*out = sqrtf(powf(sumx, 2) + powf(sumy, 2));
}
}
extern "C"
void cudaSobel(cv::Mat & input, cv::Mat & output)
{
input.convertTo(input, CV_32S);
output = cv::Mat(input.size(), CV_32S, cv::Scalar(0));
int *d_input, *d_output;
size_t pitch;
cudaStream_t inputStream, outputStream;
CUDA_CALL(cudaStreamCreate(&inputStream)); CUDA_CALL(cudaStreamCreate(&outputStream));
CUDA_CALL(cudaMallocPitch(&d_input, &pitch, sizeof(int)*input.cols, input.rows));
CUDA_CALL(cudaMallocPitch(&d_output, &pitch, sizeof(int)*output.cols, output.rows));
CUDA_CALL(cudaMemcpy2DAsync(d_input, pitch, input.data, sizeof(int)*input.cols, sizeof(int)*input.cols, input.rows, cudaMemcpyHostToDevice, inputStream));
CUDA_CALL(cudaMemcpy2DAsync(d_output, pitch, output.data, sizeof(int)*output.cols, sizeof(int)*output.cols, output.rows, cudaMemcpyHostToDevice, outputStream));
dim3 threadSize(16, 16);
dim3 blockSize(input.cols / threadSize.x, input.rows / threadSize.y);
sobel<<<blockSize, threadSize>>>(d_input, input.rows, input.cols, 1, 8, pitch, d_output);
CUDA_CALL(cudaDeviceSynchronize());
// get data back
CUDA_CALL(cudaMemcpy2D(output.data, sizeof(int)*output.cols, d_output, pitch, sizeof(int)*output.cols, output.rows, cudaMemcpyDeviceToHost));
// resource releasing
cudaFree(d_input); cudaFree(d_output);
CUDA_CALL(cudaStreamDestroy(inputStream)); CUDA_CALL(cudaStreamDestroy(outputStream));
output.convertTo(output, CV_8U);
input.convertTo(input, CV_8U);
} |
cae1db298ccb53d38d7f26281cefc0b6b682c52b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
template <class U>
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge,unsigned T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE );
template <class U>
__global__ void keepHeapPQ(int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U>
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template <class U>
__global__ void checkMIN(int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE);
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE);
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE);
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight);
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
///////////////////////////////////////////
#include "kernels/d_a_star_kernels.cu"
#include "d_a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T,class U>
GPU_D_A_Star<T,U>:: GPU_D_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start,unsigned int end, unsigned int K )
{
this->graph = graph;
this->start_node = start;
this->end_node = end;
this->num_pq = K;
this->flag_end = 0;
this->flag_found = 0;
this->is_set_hx = false;
this->next_vertices_size = 0;
this->num_updated_paths = 0;
__alloc_cpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_huiristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( hipMemcpy(d_Hx,Hx,sizeof(U)*N,hipMemcpyHostToDevice) );
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_cpu()
{
int N = this->graph->get_graph().get_num_nodes();
int K = this->num_pq;
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->Hx = (U*)malloc(sizeof(U)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->parent_old = (int*)malloc(sizeof(int)*N);
this->next_vertices_flag = (int*)malloc(sizeof(int)*N);
this->next_vertices = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->parent_old,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
memset(this->next_vertices_flag,-1,sizeof(int)*N);
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( hipMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_parent_old,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( hipMalloc(&d_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( hipMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( hipMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( hipMalloc(&d_expand_nodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( hipMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( hipMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( hipMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( hipMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_lock,0,sizeof(int)*N) );
// gpuErrchk ( hipMemcpy(d_Cx,Cx,sizeof(U)*N,hipMemcpyHostToDevice) );
// gpuErrchk ( hipMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,hipMemcpyHostToDevice) );
// gpuErrchk ( hipMemcpy(d_parent,parent,sizeof(int)*N,hipMemcpyHostToDevice) );
// gpuErrchk ( hipMemcpy(d_open_list,open_list,sizeof(int)*N,hipMemcpyHostToDevice) );
}
#endif | cae1db298ccb53d38d7f26281cefc0b6b682c52b.cu | #ifdef __NVCC__
template <class U>
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge,unsigned T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE );
template <class U>
__global__ void keepHeapPQ(int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U>
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template <class U>
__global__ void checkMIN(int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE);
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE);
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE);
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight);
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
///////////////////////////////////////////
#include "kernels/d_a_star_kernels.cu"
#include "d_a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T,class U>
GPU_D_A_Star<T,U>:: GPU_D_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start,unsigned int end, unsigned int K )
{
this->graph = graph;
this->start_node = start;
this->end_node = end;
this->num_pq = K;
this->flag_end = 0;
this->flag_found = 0;
this->is_set_hx = false;
this->next_vertices_size = 0;
this->num_updated_paths = 0;
__alloc_cpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_huiristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( cudaMemcpy(d_Hx,Hx,sizeof(U)*N,cudaMemcpyHostToDevice) );
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_cpu()
{
int N = this->graph->get_graph().get_num_nodes();
int K = this->num_pq;
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->Hx = (U*)malloc(sizeof(U)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->parent_old = (int*)malloc(sizeof(int)*N);
this->next_vertices_flag = (int*)malloc(sizeof(int)*N);
this->next_vertices = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->parent_old,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
memset(this->next_vertices_flag,-1,sizeof(int)*N);
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( cudaMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_parent_old,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( cudaMalloc(&d_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&d_expand_nodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( cudaMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( cudaMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_lock,0,sizeof(int)*N) );
// gpuErrchk ( cudaMemcpy(d_Cx,Cx,sizeof(U)*N,cudaMemcpyHostToDevice) );
// gpuErrchk ( cudaMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,cudaMemcpyHostToDevice) );
// gpuErrchk ( cudaMemcpy(d_parent,parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
// gpuErrchk ( cudaMemcpy(d_open_list,open_list,sizeof(int)*N,cudaMemcpyHostToDevice) );
}
#endif |
b7193bbd843e333b40c1f24ce096b653042f21d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <device_types.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <omp.h>
#define SEED 1
#define THREADS 128
#define BLOCKS 1024
// MATRIX_DIM_M,MATRIX_DIM_N,XTILES,YTILES
__constant__ unsigned int symbolDIMS[4];
__global__ void multMV_kernel(double*,double*,double*);
double getclock();
int main(int argc, char** argv){
printf("\n\nStarting GPUfloatMV...\n");
unsigned int MATRIX_DIM_M = atoi(argv[1]);//rows
unsigned int MATRIX_DIM_N = atoi(argv[2]);//columns
double perturb=0.;//atof(argv[3]);
// Get device information
int count;
hipGetDeviceCount(&count);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0);
printf("Compute capability: %d.%d\n",prop.major,prop.minor);
printf("Number of GPUs: %d\n",count);
printf("Multiprocessor count: %d\n",prop.multiProcessorCount);
printf("Clock rate: %luKhz\n",prop.clockRate/1000);
printf("Total Global Memory: %luMB\n",(unsigned int)prop.totalGlobalMem/1000000);
printf("Total Constant Memory: %d\n",prop.totalConstMem);
printf("Shared memory per block: %d\n",prop.sharedMemPerBlock);
printf("1-D Texture Max size: %d\n",prop.maxTexture1D);
printf("Number of registers per block: %d\n",prop.regsPerBlock);
printf("Can I map host memory: %d\n",prop.canMapHostMemory);
printf("Max number of threads per block: %d\n",prop.maxThreadsPerBlock);
printf("Max number of blocks in a grid [0]: %d\n",prop.maxGridSize[0]);
printf("Max number of blocks in a grid [1]: %d\n",prop.maxGridSize[1]);
printf("Max number of blocks in a grid [2]: %d\n",prop.maxGridSize[2]);
printf("Max Texture dimensions 2D: %lu\n",prop.maxTexture2D[2]);
printf("Concurrent Kernels: %d\n",prop.concurrentKernels);
printf("Threads in a warp: %d\n",prop.warpSize);
// some general cpu info
printf("size of float (cpu): %d\n",sizeof(float));
printf("size of unsigned int (cpu): %d\n",sizeof(unsigned int));
printf("size of unsigned long (cpu): %d\n",sizeof(unsigned long));
srand(SEED);
int i,j;
double cs,ce;
hipError_t cudastatus0,cudastatus1,
cudastatus2,cudastatus3,
cudastatus4,cudastatus5,
cudastatus6,cudastatus7;
float elapsedtime;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate random Matrix
unsigned int memmatrixsize=sizeof(double)*MATRIX_DIM_M*MATRIX_DIM_N;
double* Matrix=(double*) malloc(memmatrixsize);
printf("\n\nmemsize of Matrix: %luKB\n",memmatrixsize/1000);
// Dense vector
unsigned int memvectorsize=MATRIX_DIM_N*sizeof(double);
double* vector=(double*)malloc(memvectorsize);
printf("memsize of vector: %luMB\n",memvectorsize/1000);
printf("Total data transfer to gpu: %luKB\n",(memvectorsize+memmatrixsize)/1000);
double* returnvector=(double*)malloc(MATRIX_DIM_M*sizeof(double));
// fill Matrix and vector
// #pragma omp parallel for private(i,j)
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++) Matrix[j*MATRIX_DIM_M+i]=(double) 1.0*(1.0/((rand()%100)+1));
}
for(i=0;i<MATRIX_DIM_N;i++)
vector[i]=(double) 1.0*(1.0/((rand()%100)+1))+perturb;
printf("sample vector[0]: %f\n",vector[0]);
printf("sample vector[1]: %f\n",vector[1]);
printf("sample vector[2]: %f\n",vector[2]);
printf("sample vector[3]: %f\n",vector[3]);
printf("sample vector[4]: %f\n",vector[4]);
printf("sample vector[5]: %f\n",vector[5]);
printf("sample vector[6]: %f\n",vector[6]);
// allocate GPU device memory
cs=getclock();
double* dev_vector;
cudastatus0=hipMalloc((void**)&dev_vector,MATRIX_DIM_N*sizeof(double));
cudastatus1=hipMemcpy(dev_vector,vector,MATRIX_DIM_N*sizeof(double),hipMemcpyHostToDevice);
if(cudastatus0!=hipSuccess|cudastatus1!=hipSuccess){
printf("Error in dev_vector memory allocation:\nstatus0: %s, status1: %s\nExiting...\n\n",
hipGetErrorString(cudastatus0),
hipGetErrorString(cudastatus1));
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
double* dev_Matrix;
cudastatus2=hipMalloc((void**)&dev_Matrix,MATRIX_DIM_M*MATRIX_DIM_N*sizeof(double));
cudastatus3=hipMemcpy(dev_Matrix,Matrix,MATRIX_DIM_M*MATRIX_DIM_N*sizeof(double),hipMemcpyHostToDevice);
if(cudastatus2!=hipSuccess|cudastatus3!=hipSuccess){
printf("Error in dev_Matrix memory allocation:\nstatus2: %s, status3: %s.\nExiting...\n\n",
hipGetErrorString(cudastatus2),
hipGetErrorString(cudastatus3));
if(dev_vector) hipFree(dev_vector);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
double* dev_returnVector;
cudastatus4=hipMalloc((void**)&dev_returnVector,MATRIX_DIM_M*sizeof(double));
cudastatus5=hipMemset(dev_returnVector,0.0,MATRIX_DIM_M*sizeof(double));
if(cudastatus4!=hipSuccess|cudastatus5!=hipSuccess){
printf("Error in dev_returnVector memory allocation:\nstatus4: %s, status5: %s\nExiting...\n\n",
hipGetErrorString(cudastatus4),
hipGetErrorString(cudastatus5));
if(dev_vector) hipFree(dev_vector);
if(dev_Matrix) hipFree(dev_Matrix);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
// update constant memory
unsigned int ytiles=ceil((float)MATRIX_DIM_M/(float)BLOCKS);
unsigned int xtiles=ceil((float)MATRIX_DIM_N/(float)THREADS);
printf("ytiles: %d, xtiles: %d\n",ytiles,xtiles);
unsigned int matrixdims[4]={MATRIX_DIM_N,MATRIX_DIM_M,xtiles,ytiles};
cudastatus6=hipMemcpyToSymbol("symbolDIMS",matrixdims,4*sizeof(unsigned int));
if(cudastatus6!=hipSuccess){
printf("Error in symbol copy:\nstatus6: %s.\nExiting...\n\n",
hipGetErrorString(cudastatus6));
if(dev_vector) hipFree(dev_vector);
if(dev_Matrix) hipFree(dev_Matrix);
if(dev_returnVector) hipFree(dev_returnVector);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
ce=getclock();
// set thread grid layout
const int num_blocks=BLOCKS;
const int num_threads_per_block=THREADS;
printf("Set number of BLOCKS: %d, number of THREADS_PER_BLOCK: %d\n",num_blocks,num_threads_per_block);
printf("------------------------------------------------------------\n\n");
// start timer
hipEventRecord(start,0);
// call kernel
hipLaunchKernelGGL(( multMV_kernel), dim3(num_blocks),dim3(num_threads_per_block), 0, 0, dev_Matrix,dev_vector,dev_returnVector);
// end timer
hipEventRecord(stop,0);
hipEventSynchronize(stop); // barrier
// kernel time
hipEventElapsedTime(&elapsedtime,start,stop);
cudastatus7=hipMemcpy(returnvector,dev_returnVector,MATRIX_DIM_M*sizeof(double),hipMemcpyDeviceToHost);
if(cudastatus7!=hipSuccess){
printf("Error, kernel return status: %s\nExiting...\n\n",
hipGetErrorString(cudastatus7));
if(dev_vector) hipFree(dev_vector);
if(dev_Matrix) hipFree(dev_Matrix);
if(dev_returnVector) hipFree(dev_returnVector);
if(vector) free(vector);
if(Matrix) free(Matrix);
exit(EXIT_FAILURE);
}else{
//for(i=0;i<MATRIX_DIM_M;i++) printf("returnvector: %f\n",returnvector[i]);
printf("Kernel return successfully, elapsed time: %6.9lf sec.\n",elapsedtime/1000.0);
printf("Data set up time, elapsed time: %6.9lf sec.\n",ce-cs);
printf("Total gpu kernel elapsed time: %6.9lf sec.\n",(elapsedtime/1000.0)+(ce-cs));
printf("...............................................\n");
}
printf("Calculating with openmp on CPU with 8 cores...");
cs = getclock();
double* wvector = (double*) calloc(MATRIX_DIM_M,sizeof(double));
#pragma omp parallel for private(i,j)
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++){
wvector[i]+=Matrix[i*MATRIX_DIM_N+j]*vector[j];
}
}
ce = getclock();
printf("finished, elapsed time: %6.9lf sec.\n",ce-cs);
printf("Calculating on CPU in serial...");
free(wvector);
cs = getclock();
wvector = (double*) calloc(MATRIX_DIM_M,sizeof(double));
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++){
wvector[i]+=Matrix[i*MATRIX_DIM_N+j]*vector[j];
}
}
ce = getclock();
printf("finished, elapsed time: %6.9lf sec.\n",ce-cs);
// for(i=0;i<MATRIX_DIM_M;i++)printf("wvector[%d]: %f\n",i,wvector[i]);
double answer=0.;
// float cpu_ysqsum=0.;
// float gpu_ysqsum=0.;
unsigned int counter=0;
printf("\n\nVerifying correct answer from gpu.\n");
for(i=0;i<MATRIX_DIM_M;i++){
answer=wvector[i]-returnvector[i];
// printf("answer: %f\n",answer);
// printf("CPUvector: %f\n",wvector[i]);
// printf("GPUvector: %f\n",returnvector[i]);
// cpu_ysqsum+=(wvector[i]*wvector[i]);
// gpu_ysqsum+=(returnvector[i]*returnvector[i]);
if (answer!=0.0){
printf("Error, divergent value: %f for index: %d\n",answer,i);
printf("CPUvector (serial): %f\n",wvector[i]);
printf("GPUvector: %f\n",returnvector[i]);
counter++;
if(counter>0) break;
}
}
// printf("\nCPU ysqsum: %9.9lf\n",cpu_ysqsum);
// printf("GPU ysqsum: %9.9lf\n\n",gpu_ysqsum);
/*printf("Writing out to file...\n");
FILE* fp=fopen(outfile,"w");
for(i=0;i<MATRIX_DIM;i++){
fprintf(fp,"%f ",y[i]);
}
fclose(fp);*/
printf("Freeing memory...\n");
if(dev_vector) hipFree(dev_vector);
if(dev_Matrix) hipFree(dev_Matrix);
if(dev_returnVector)hipFree(dev_returnVector);
free(vector);
free(returnvector);
free(wvector);
free(Matrix);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Done, exiting...\n\n\n");
exit(EXIT_SUCCESS);
}
// Y-BLOCK TILES
__global__ void multMV_kernel(double* M, double* v, double* w){
unsigned int i,j,tileId,offset,vindex,ypos;
__shared__ double vcache[THREADS];
__shared__ double wback[THREADS];
//symbolDIMS[0]== M
//symbolDIMS[1]== N
//symbolDIMS[2]== xtiles
//symbolDIMS[3]== ytiles
for(i=0;i<symbolDIMS[2];i++){
vindex=i*blockDim.x+threadIdx.x;
if(vindex<symbolDIMS[0]) vcache[threadIdx.x]=v[vindex];
else vcache[threadIdx.x]=0.0;
//do memory accesses
for(j=0;j<symbolDIMS[3];j++){
ypos=j*gridDim.x+blockIdx.x;
tileId=symbolDIMS[0]*ypos+vindex;
if(tileId < symbolDIMS[0]*symbolDIMS[1])wback[threadIdx.x]=M[tileId]*vcache[threadIdx.x];
else wback[threadIdx.x]=0.;
__syncthreads();
// per block thread reduction
offset = blockDim.x/2;
while(offset>0){
if(threadIdx.x<offset)
wback[threadIdx.x]+=wback[threadIdx.x+offset];
__syncthreads();
offset/=2;
}//end while
// top level tile reduction
if(threadIdx.x==0) w[ypos]+=wback[0];
__syncthreads();
}//end Ytiles-for
}//end Xtiles-for
}//end function
double getclock(){
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
| b7193bbd843e333b40c1f24ce096b653042f21d8.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include <device_types.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <omp.h>
#define SEED 1
#define THREADS 128
#define BLOCKS 1024
// MATRIX_DIM_M,MATRIX_DIM_N,XTILES,YTILES
__constant__ unsigned int symbolDIMS[4];
__global__ void multMV_kernel(double*,double*,double*);
double getclock();
int main(int argc, char** argv){
printf("\n\nStarting GPUfloatMV...\n");
unsigned int MATRIX_DIM_M = atoi(argv[1]);//rows
unsigned int MATRIX_DIM_N = atoi(argv[2]);//columns
double perturb=0.;//atof(argv[3]);
// Get device information
int count;
cudaGetDeviceCount(&count);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
printf("Compute capability: %d.%d\n",prop.major,prop.minor);
printf("Number of GPUs: %d\n",count);
printf("Multiprocessor count: %d\n",prop.multiProcessorCount);
printf("Clock rate: %luKhz\n",prop.clockRate/1000);
printf("Total Global Memory: %luMB\n",(unsigned int)prop.totalGlobalMem/1000000);
printf("Total Constant Memory: %d\n",prop.totalConstMem);
printf("Shared memory per block: %d\n",prop.sharedMemPerBlock);
printf("1-D Texture Max size: %d\n",prop.maxTexture1D);
printf("Number of registers per block: %d\n",prop.regsPerBlock);
printf("Can I map host memory: %d\n",prop.canMapHostMemory);
printf("Max number of threads per block: %d\n",prop.maxThreadsPerBlock);
printf("Max number of blocks in a grid [0]: %d\n",prop.maxGridSize[0]);
printf("Max number of blocks in a grid [1]: %d\n",prop.maxGridSize[1]);
printf("Max number of blocks in a grid [2]: %d\n",prop.maxGridSize[2]);
printf("Max Texture dimensions 2D: %lu\n",prop.maxTexture2D[2]);
printf("Concurrent Kernels: %d\n",prop.concurrentKernels);
printf("Threads in a warp: %d\n",prop.warpSize);
// some general cpu info
printf("size of float (cpu): %d\n",sizeof(float));
printf("size of unsigned int (cpu): %d\n",sizeof(unsigned int));
printf("size of unsigned long (cpu): %d\n",sizeof(unsigned long));
srand(SEED);
int i,j;
double cs,ce;
cudaError_t cudastatus0,cudastatus1,
cudastatus2,cudastatus3,
cudastatus4,cudastatus5,
cudastatus6,cudastatus7;
float elapsedtime;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate random Matrix
unsigned int memmatrixsize=sizeof(double)*MATRIX_DIM_M*MATRIX_DIM_N;
double* Matrix=(double*) malloc(memmatrixsize);
printf("\n\nmemsize of Matrix: %luKB\n",memmatrixsize/1000);
// Dense vector
unsigned int memvectorsize=MATRIX_DIM_N*sizeof(double);
double* vector=(double*)malloc(memvectorsize);
printf("memsize of vector: %luMB\n",memvectorsize/1000);
printf("Total data transfer to gpu: %luKB\n",(memvectorsize+memmatrixsize)/1000);
double* returnvector=(double*)malloc(MATRIX_DIM_M*sizeof(double));
// fill Matrix and vector
// #pragma omp parallel for private(i,j)
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++) Matrix[j*MATRIX_DIM_M+i]=(double) 1.0*(1.0/((rand()%100)+1));
}
for(i=0;i<MATRIX_DIM_N;i++)
vector[i]=(double) 1.0*(1.0/((rand()%100)+1))+perturb;
printf("sample vector[0]: %f\n",vector[0]);
printf("sample vector[1]: %f\n",vector[1]);
printf("sample vector[2]: %f\n",vector[2]);
printf("sample vector[3]: %f\n",vector[3]);
printf("sample vector[4]: %f\n",vector[4]);
printf("sample vector[5]: %f\n",vector[5]);
printf("sample vector[6]: %f\n",vector[6]);
// allocate GPU device memory
cs=getclock();
double* dev_vector;
cudastatus0=cudaMalloc((void**)&dev_vector,MATRIX_DIM_N*sizeof(double));
cudastatus1=cudaMemcpy(dev_vector,vector,MATRIX_DIM_N*sizeof(double),cudaMemcpyHostToDevice);
if(cudastatus0!=cudaSuccess|cudastatus1!=cudaSuccess){
printf("Error in dev_vector memory allocation:\nstatus0: %s, status1: %s\nExiting...\n\n",
cudaGetErrorString(cudastatus0),
cudaGetErrorString(cudastatus1));
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
double* dev_Matrix;
cudastatus2=cudaMalloc((void**)&dev_Matrix,MATRIX_DIM_M*MATRIX_DIM_N*sizeof(double));
cudastatus3=cudaMemcpy(dev_Matrix,Matrix,MATRIX_DIM_M*MATRIX_DIM_N*sizeof(double),cudaMemcpyHostToDevice);
if(cudastatus2!=cudaSuccess|cudastatus3!=cudaSuccess){
printf("Error in dev_Matrix memory allocation:\nstatus2: %s, status3: %s.\nExiting...\n\n",
cudaGetErrorString(cudastatus2),
cudaGetErrorString(cudastatus3));
if(dev_vector) cudaFree(dev_vector);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
double* dev_returnVector;
cudastatus4=cudaMalloc((void**)&dev_returnVector,MATRIX_DIM_M*sizeof(double));
cudastatus5=cudaMemset(dev_returnVector,0.0,MATRIX_DIM_M*sizeof(double));
if(cudastatus4!=cudaSuccess|cudastatus5!=cudaSuccess){
printf("Error in dev_returnVector memory allocation:\nstatus4: %s, status5: %s\nExiting...\n\n",
cudaGetErrorString(cudastatus4),
cudaGetErrorString(cudastatus5));
if(dev_vector) cudaFree(dev_vector);
if(dev_Matrix) cudaFree(dev_Matrix);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
// update constant memory
unsigned int ytiles=ceil((float)MATRIX_DIM_M/(float)BLOCKS);
unsigned int xtiles=ceil((float)MATRIX_DIM_N/(float)THREADS);
printf("ytiles: %d, xtiles: %d\n",ytiles,xtiles);
unsigned int matrixdims[4]={MATRIX_DIM_N,MATRIX_DIM_M,xtiles,ytiles};
cudastatus6=cudaMemcpyToSymbol("symbolDIMS",matrixdims,4*sizeof(unsigned int));
if(cudastatus6!=cudaSuccess){
printf("Error in symbol copy:\nstatus6: %s.\nExiting...\n\n",
cudaGetErrorString(cudastatus6));
if(dev_vector) cudaFree(dev_vector);
if(dev_Matrix) cudaFree(dev_Matrix);
if(dev_returnVector) cudaFree(dev_returnVector);
if(vector)free(vector);
if(Matrix)free(Matrix);
exit(EXIT_FAILURE);
}
ce=getclock();
// set thread grid layout
const int num_blocks=BLOCKS;
const int num_threads_per_block=THREADS;
printf("Set number of BLOCKS: %d, number of THREADS_PER_BLOCK: %d\n",num_blocks,num_threads_per_block);
printf("------------------------------------------------------------\n\n");
// start timer
cudaEventRecord(start,0);
// call kernel
multMV_kernel<<<num_blocks,num_threads_per_block>>>(dev_Matrix,dev_vector,dev_returnVector);
// end timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop); // barrier
// kernel time
cudaEventElapsedTime(&elapsedtime,start,stop);
cudastatus7=cudaMemcpy(returnvector,dev_returnVector,MATRIX_DIM_M*sizeof(double),cudaMemcpyDeviceToHost);
if(cudastatus7!=cudaSuccess){
printf("Error, kernel return status: %s\nExiting...\n\n",
cudaGetErrorString(cudastatus7));
if(dev_vector) cudaFree(dev_vector);
if(dev_Matrix) cudaFree(dev_Matrix);
if(dev_returnVector) cudaFree(dev_returnVector);
if(vector) free(vector);
if(Matrix) free(Matrix);
exit(EXIT_FAILURE);
}else{
//for(i=0;i<MATRIX_DIM_M;i++) printf("returnvector: %f\n",returnvector[i]);
printf("Kernel return successfully, elapsed time: %6.9lf sec.\n",elapsedtime/1000.0);
printf("Data set up time, elapsed time: %6.9lf sec.\n",ce-cs);
printf("Total gpu kernel elapsed time: %6.9lf sec.\n",(elapsedtime/1000.0)+(ce-cs));
printf("...............................................\n");
}
printf("Calculating with openmp on CPU with 8 cores...");
cs = getclock();
double* wvector = (double*) calloc(MATRIX_DIM_M,sizeof(double));
#pragma omp parallel for private(i,j)
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++){
wvector[i]+=Matrix[i*MATRIX_DIM_N+j]*vector[j];
}
}
ce = getclock();
printf("finished, elapsed time: %6.9lf sec.\n",ce-cs);
printf("Calculating on CPU in serial...");
free(wvector);
cs = getclock();
wvector = (double*) calloc(MATRIX_DIM_M,sizeof(double));
for(i=0;i<MATRIX_DIM_M;i++){
for(j=0;j<MATRIX_DIM_N;j++){
wvector[i]+=Matrix[i*MATRIX_DIM_N+j]*vector[j];
}
}
ce = getclock();
printf("finished, elapsed time: %6.9lf sec.\n",ce-cs);
// for(i=0;i<MATRIX_DIM_M;i++)printf("wvector[%d]: %f\n",i,wvector[i]);
double answer=0.;
// float cpu_ysqsum=0.;
// float gpu_ysqsum=0.;
unsigned int counter=0;
printf("\n\nVerifying correct answer from gpu.\n");
for(i=0;i<MATRIX_DIM_M;i++){
answer=wvector[i]-returnvector[i];
// printf("answer: %f\n",answer);
// printf("CPUvector: %f\n",wvector[i]);
// printf("GPUvector: %f\n",returnvector[i]);
// cpu_ysqsum+=(wvector[i]*wvector[i]);
// gpu_ysqsum+=(returnvector[i]*returnvector[i]);
if (answer!=0.0){
printf("Error, divergent value: %f for index: %d\n",answer,i);
printf("CPUvector (serial): %f\n",wvector[i]);
printf("GPUvector: %f\n",returnvector[i]);
counter++;
if(counter>0) break;
}
}
// printf("\nCPU ysqsum: %9.9lf\n",cpu_ysqsum);
// printf("GPU ysqsum: %9.9lf\n\n",gpu_ysqsum);
/*printf("Writing out to file...\n");
FILE* fp=fopen(outfile,"w");
for(i=0;i<MATRIX_DIM;i++){
fprintf(fp,"%f ",y[i]);
}
fclose(fp);*/
printf("Freeing memory...\n");
if(dev_vector) cudaFree(dev_vector);
if(dev_Matrix) cudaFree(dev_Matrix);
if(dev_returnVector)cudaFree(dev_returnVector);
free(vector);
free(returnvector);
free(wvector);
free(Matrix);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Done, exiting...\n\n\n");
exit(EXIT_SUCCESS);
}
// Y-BLOCK TILES
__global__ void multMV_kernel(double* M, double* v, double* w){
unsigned int i,j,tileId,offset,vindex,ypos;
__shared__ double vcache[THREADS];
__shared__ double wback[THREADS];
//symbolDIMS[0]== M
//symbolDIMS[1]== N
//symbolDIMS[2]== xtiles
//symbolDIMS[3]== ytiles
for(i=0;i<symbolDIMS[2];i++){
vindex=i*blockDim.x+threadIdx.x;
if(vindex<symbolDIMS[0]) vcache[threadIdx.x]=v[vindex];
else vcache[threadIdx.x]=0.0;
//do memory accesses
for(j=0;j<symbolDIMS[3];j++){
ypos=j*gridDim.x+blockIdx.x;
tileId=symbolDIMS[0]*ypos+vindex;
if(tileId < symbolDIMS[0]*symbolDIMS[1])wback[threadIdx.x]=M[tileId]*vcache[threadIdx.x];
else wback[threadIdx.x]=0.;
__syncthreads();
// per block thread reduction
offset = blockDim.x/2;
while(offset>0){
if(threadIdx.x<offset)
wback[threadIdx.x]+=wback[threadIdx.x+offset];
__syncthreads();
offset/=2;
}//end while
// top level tile reduction
if(threadIdx.x==0) w[ypos]+=wback[0];
__syncthreads();
}//end Ytiles-for
}//end Xtiles-for
}//end function
double getclock(){
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
|
7dc0040cc974cf3267952a9cc556ac0f5e7e1463.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgesellcmmv.cu normal z -> s, Fri Jan 30 19:00:29 2015
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include "sm_32_intrinsics.h"
#define PRECISION_s
//#define TEXTURE
/*
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_ldg(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
#if defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = __ldg( dx+ i1 );
x2 = __ldg( dx+ i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = __ldg( dx + dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
#endif
}
*/
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
float val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( hipTextureObject_t texdx, const int& i){
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( blocksize, alignment, 1);
int dimgrid1 = (int) sqrt( (float)slices );
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc =
hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( alignment == 4)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 8)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 16)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 32)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
hipDestroyTextureObject(texdx);
#else
if ( alignment == 1)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 4)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 8)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 16)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 32)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32), dim3(grid), dim3(block), Ms, queue ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
| 7dc0040cc974cf3267952a9cc556ac0f5e7e1463.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgesellcmmv.cu normal z -> s, Fri Jan 30 19:00:29 2015
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include "sm_32_intrinsics.h"
#define PRECISION_s
//#define TEXTURE
/*
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_ldg(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
#if defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = __ldg( dx+ i1 );
x2 = __ldg( dx+ i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = __ldg( dx + dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
#endif
}
*/
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
float val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( cudaTextureObject_t texdx, const int& i){
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
dcolind += offset + ldx ;
dval += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( blocksize, alignment, 1);
int dimgrid1 = (int) sqrt( (float)slices );
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc =
cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( alignment == 4)
zgesellptmv2d_kernel_4_tex<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 8)
zgesellptmv2d_kernel_8_tex<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 16)
zgesellptmv2d_kernel_16_tex<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else if ( alignment == 32)
zgesellptmv2d_kernel_32_tex<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
cudaDestroyTextureObject(texdx);
#else
if ( alignment == 1)
zgesellptmv2d_kernel_1<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 4)
zgesellptmv2d_kernel_4<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 8)
zgesellptmv2d_kernel_8<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 16)
zgesellptmv2d_kernel_16<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else if ( alignment == 32)
zgesellptmv2d_kernel_32<<< grid, block, Ms, queue >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
|
fc8a9f1cb7b9e9089ac7cb3f2594fe897c3adeb6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
//using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
//hipSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
hipSetDevice(_deviceID);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
NVMatrix::initRandom(time(0));
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
| fc8a9f1cb7b9e9089ac7cb3f2594fe897c3adeb6.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <util.cuh>
//using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(this, paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(this, paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(this, paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(this, paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(this, paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(this, paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(this, paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(this, paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(this, paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(this, paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(this, paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(this, paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(this, paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(this, paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(this, paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(this, paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
//cudaSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
cudaSetDevice(_deviceID);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
NVMatrix::initRandom(time(0));
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients();
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
|
1539fbe775f8a7c185dc0c32b2e9ebcd7e33ea61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "arquivo3.h"
#include "comm/comm.h"
__global__ void compute3(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = d_buffer[ix]*10;
__syncthreads();
}
extern "C" void funcao4(){
int N=500;
int *buffer,*d_buffer;
int i,j,sum;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N*N);
hipMalloc(&d_buffer,sizeof(int)*N*N);
sum = 0;
for(i=0;i<11;i++){
receiveMessage("funcao4","funcao3", INT, (void*)d_buffer, N*N);
for(j=0;j<100000;j++){
hipLaunchKernelGGL(( compute3), dim3(grid),dim3(block), 0, 0, d_buffer);
}
//sendMessage("funcao4","funcao5", INT, (void*)d_buffer, N*N);
//hipMemcpy(buffer,d_buffer,N*N*sizeof(int),hipMemcpyDeviceToHost);
//for(j=0;j<N*N;j++){
// sum = sum + buffer[j];
//}
}
//printf("Soma Funo 3: %d\n",sum);
//printf("Recebendo Mensagem...\n");
//receiveMessage("funcao3","funcao1", INT, (void*)buffer, 10);
//for(i=0;i<10;i++){
// printf("Buffer[%d]: %d\n",i,buffer[i]);
//}
//printf("Mensagem recebida...\n");
}
| 1539fbe775f8a7c185dc0c32b2e9ebcd7e33ea61.cu | #include <stdio.h>
#include <stdlib.h>
#include "arquivo3.h"
#include "comm/comm.h"
__global__ void compute3(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = d_buffer[ix]*10;
__syncthreads();
}
extern "C" void funcao4(){
int N=500;
int *buffer,*d_buffer;
int i,j,sum;
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*N*N);
cudaMalloc(&d_buffer,sizeof(int)*N*N);
sum = 0;
for(i=0;i<11;i++){
receiveMessage("funcao4","funcao3", INT, (void*)d_buffer, N*N);
for(j=0;j<100000;j++){
compute3<<<grid,block>>>(d_buffer);
}
//sendMessage("funcao4","funcao5", INT, (void*)d_buffer, N*N);
//cudaMemcpy(buffer,d_buffer,N*N*sizeof(int),cudaMemcpyDeviceToHost);
//for(j=0;j<N*N;j++){
// sum = sum + buffer[j];
//}
}
//printf("Soma Função 3: %d\n",sum);
//printf("Recebendo Mensagem...\n");
//receiveMessage("funcao3","funcao1", INT, (void*)buffer, 10);
//for(i=0;i<10;i++){
// printf("Buffer[%d]: %d\n",i,buffer[i]);
//}
//printf("Mensagem recebida...\n");
}
|
f0be759cd1624171d1d91f6f9fc82bc5a337e388.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
using namespace std;
#define EIGEN_USE_GPU
#define maxThreadsPerBlock 1024
__global__ void _qsgdreduceSumV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i];// + g_idata[i + blockDim.x];
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) sdata[tid] += sdata[tid + 512];
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdreduceClipThresholdV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
if (isfinite(g_idata[i])) {
sdata[tid] += g_idata[i] * g_idata[i];// + g_idata[i + blockDim.x] * g_idata[i + blockDim.x];
}
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) sdata[tid] += sdata[tid + 512];
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdreduceAbsMaxV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
if (isfinite(g_idata[i]) && isfinite(sdata[tid]))
sdata[tid] = fmaxf(sdata[tid], fabsf(g_idata[i])); //fmaxf(fabsf(g_idata[i]), fabsf(g_idata[i + blockDim.x])));
else
sdata[tid] = nanf("123");
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 512])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 512]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 256])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 256]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 128])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 128]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 64])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 64]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 32]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 32]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 16]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 16]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 8]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 8]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 4]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 4]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 2]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 2]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 1]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 1]);
else vsmem[tid] = nanf("123");
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdcomputeSqrt(float *scaler)
{
*scaler = sqrt(*scaler);
//printf("l2 norm result: %f\n", *scaler);
//__syncthreads();
}
__global__ void _qsgdinitCURand(unsigned int len, unsigned int seed, hiprandState_t* states)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
/* we have to initialize the state */
if (index < len)
hiprand_init(seed + index, /* the seed can be the same for each core, here we pass the time in from the CPU */
0, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[index]);
}
__global__ void _qsgdcompensateMemory(float *dst, const float *src, const float *local_mem, int len)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
for (int i = index; i < len; i += stride){
if (isfinite(src[i])) {
//dst[i] = src[i]; // + local_mem[i]; //remove memory compensation for comparison purposes.
dst[i] = src[i] + local_mem[i];
}
else {
dst[i] = nanf("123");
}
//printf("CompensateMemory result: idx=%d, src=%f, mem=%f, dst=%f\n", i, src[i], local_mem[i], dst[i]);
//__syncthreads();
}
}
__global__ void _qsgdTernarizeValue(int8_t *dst, const float *src, float *scaler, float *local_mem, const int len, int level, hiprandState_t* states)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
hiprandState_t local_state = states[index];
float norm_scaler = *scaler;
// The input tensor here has been clipped.
// Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i])
for (int i = index; i < len; i += stride) {
if (isfinite(norm_scaler) && isfinite(src[i])) {
float rand_sample = hiprand_uniform(&local_state);
float level_float = (float)level / norm_scaler * fabsf(src[i]);
int8_t previous_level = floor(level_float);
if (rand_sample < level_float - previous_level) {
dst[i] = previous_level + 1; // 1 is required by qsgd
}
else {
dst[i] = previous_level;
}
if (src[i] < 0){
dst[i] = -dst[i];
}
// update local memory
local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes.
}
else {
// encode value to the minimum for Inf or NaN
dst[i] = -128;
}
//printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]);
//__syncthreads();
}
}
// For qsgd allreduce
// __global__ void _qsgdDeternarizeValue(int len, float *dst, int8_t *src, float *scaler, int level)
// {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
// float norm_scaler = *scaler;
// for (int i = index; i < len; i += stride)
// {
// dst[i] = norm_scaler / (float)level * (float)src[i];
// }
// }
// For qsgd allgather
__global__ void _qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, float *scaler, int level)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float norm_scaler = *scaler;
for (int i = index; i < len; i += stride) {
if (src[i] == -128) {
dst[i] = nanf("123");
}
else {
dst[i] += norm_scaler / (float)level * (float)src[i];
}
//printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]);
//__syncthreads();
}
}
__global__ void _bucket_l2norm(const int len, double *dst, float *src, const int bucket_size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const int loop_times = len / bucket_size;
const int remain_nums = len % bucket_size;
for (int i = index; i < loop_times; i += stride)
{
#pragma unroll
for (int j = 0; j < bucket_size; j ++){
if (isfinite(src[bucket_size*i+j])) {
dst[i] += (double)(src[bucket_size*i+j]) * (double)(src[bucket_size*i+j]);
}
}
dst[i] = sqrt(dst[i]);
}
if (remain_nums && index == loop_times){
#pragma unroll
for (int i = 0; i < remain_nums; i++){
if (isfinite(src[bucket_size*loop_times+i])) {
dst[loop_times] += (double)(src[bucket_size*loop_times+i]) * (double)(src[bucket_size*loop_times+i]);
}
}
dst[loop_times] = sqrt(dst[loop_times]);
}
}
__global__ void _bucket_qsgdTernarizeValue(int8_t *dst, const float *src, double *scaler, const int len, int level, const int bucket_size, unsigned int seed)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
// hiprandState_t local_state = states[index];
hiprandState_t local_state;
// The input tensor here has been clipped.
// Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i])
for (int i = index; i < len; i += stride) {
float norm_scaler = (float)(scaler[i/bucket_size]);
hiprand_init(seed + index, 0, 0, &local_state);
if (isfinite(norm_scaler) && isfinite(src[i])) {
float rand_sample = hiprand_uniform(&local_state);
float level_float = (float)level / norm_scaler * fabsf(src[i]);
int8_t previous_level = floor(level_float);
if (rand_sample < level_float - previous_level) {
dst[i] = previous_level + 1; // 1 is required by qsgd
}
else {
dst[i] = previous_level;
}
if (src[i] < 0){
dst[i] = -dst[i];
}
// update local memory
//local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes.
}
else {
// encode value to the minimum for Inf or NaN
dst[i] = -128;
}
//printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]);
//__syncthreads();
}
}
// For qsgd allgather
__global__ void _bucket_qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < len; i += stride) {
float norm_scaler = (float)(scaler[i/bucket_size]);
if (src[i] == -128) {
dst[i] = nanf("123");
}
else {
dst[i] = norm_scaler / (float)level * (float)src[i];
//atomicAdd(dst+i, norm_scaler / (float)level * (float)src[i]);
}
//printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]);
//__syncthreads();
}
}
/*----------------------------------- Reduce Wrapper --------------------------------------------*/
void qsgdGPUReduce(int len, float *d_out, float *d_intermediate_res, float *result, int whichKernel, hipStream_t stream) {
// d_intermediate_res holds the input
// setting up blocks
int numBlocks = (int) ceil(1.0 * len / maxThreadsPerBlock); //(len / maxThreadsPerBlock) + 1;
int prevNumBlocks = len;
// recursively reduce to get the result
while (numBlocks > maxThreadsPerBlock) {
// clear d_out
hipMemset(d_out, 0, numBlocks * sizeof(float));
switch (whichKernel) {
// reduce sum
case 0:
hipLaunchKernelGGL(( _qsgdreduceSumV2), dim3(numBlocks), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, len);
break;
// reduce absmax
case 1:
hipLaunchKernelGGL(( _qsgdreduceAbsMaxV2), dim3(numBlocks), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, len);
break;
// reduce clip threshold
case 2:
hipLaunchKernelGGL(( _qsgdreduceClipThresholdV2), dim3(numBlocks), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, len);
// we don't need to square the intermediate results.
whichKernel = 0;
break;
default:
break;
}
// by now, d_out holds the intermediate result, copy it to intermedaite_res for the next run
hipMemcpy(d_intermediate_res, d_out, numBlocks * sizeof(float), hipMemcpyDeviceToDevice);
// compute reduced problem size
prevNumBlocks = numBlocks;
len = numBlocks;
numBlocks = (int) ceil(1.0 * numBlocks / maxThreadsPerBlock); //numBlocks / maxThreadsPerBlock + 1;
}
// use one block to compute the rest.
// clear d_out
hipMemset(d_out, 0, prevNumBlocks* sizeof(float));
switch (whichKernel) {
// reduce sum
case 0:
hipLaunchKernelGGL(( _qsgdreduceSumV2), dim3(1), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, prevNumBlocks);
break;
// reduce absmax
case 1:
hipLaunchKernelGGL(( _qsgdreduceAbsMaxV2), dim3(1), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, prevNumBlocks);
break;
// reduce clip threshold
case 2:
hipLaunchKernelGGL(( _qsgdreduceClipThresholdV2), dim3(1), dim3(maxThreadsPerBlock), maxThreadsPerBlock * sizeof(float), 0, d_out, d_intermediate_res, prevNumBlocks);
break;
default:
break;
}
// as we just use one block, just move the first element of d_out to result
hipMemcpy(result, d_out, sizeof(float), hipMemcpyDeviceToDevice);
}
/*----------------------------------- Kernel Launch Wrappers ------------------------------------*/
void GPUReduceL2Norm(float *array, int len, double *l2norm_scaler, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
hipLaunchKernelGGL(( _bucket_l2norm), dim3(blocksPerGrid), dim3(maxThreadsPerBlock), 0, 0, len, l2norm_scaler, array, bucket_size);
}
// void qsgdGPUInit_curand(int n, unsigned int seed, hiprandState_t* cuda_states)
// {
// int blocksPerGrid = (int) ceil(1.0 * n / maxThreadsPerBlock);
// _qsgdinitCURand<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(n, seed, cuda_states);
// }
// void qsgdGPUCompensateMemory(float *dst, const float *src, const float* local_mem, int len)
// {
// int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
// _qsgdcompensateMemory<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst, src, local_mem, len);
// }
void GPUTernarizeMultiLevelValue(int8_t *dst, const float *src, double *scaler, int len, int level, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * ::min(len, 1024 * 1024 * 25) / maxThreadsPerBlock);
unsigned int seed = time(NULL);
hipLaunchKernelGGL(( _bucket_qsgdTernarizeValue), dim3(blocksPerGrid), dim3(maxThreadsPerBlock), 0, 0, dst, src, scaler, len, level, bucket_size, seed);
}
void GPUDeternarizeMultiLevelValue(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
hipLaunchKernelGGL(( _bucket_qsgdDeternarizeAndAdd), dim3(blocksPerGrid), dim3(maxThreadsPerBlock), 0, 0, len, dst, src, scaler, level, bucket_size);
}
std::vector<torch::Tensor> qsgd_compress_cuda(torch::Tensor input, int level, int bucket_size) {
int element_nums = input.numel();
int num_buckets = ceil((float)element_nums / bucket_size);
auto d_l2norm_scaler = torch::zeros(num_buckets, torch::TensorOptions().dtype(torch::kFloat64).device(input.device()));
auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt8).device(input.device()));
// hiprandState_t* cuda_states;
// cuda_states = (hiprandState_t*)torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt).device(input.device())).data_ptr();
// qsgdGPUInit_curand(element_nums, time(NULL), cuda_states);
GPUReduceL2Norm((float*)input.data_ptr(), element_nums, (double*)d_l2norm_scaler.data_ptr(), bucket_size);
GPUTernarizeMultiLevelValue((int8_t*)buffer_data.data_ptr(), (float*)input.data_ptr(), (double*)d_l2norm_scaler.data_ptr(),
element_nums, level, bucket_size);
return {buffer_data, d_l2norm_scaler};
}
torch::Tensor qsgd_decompress_cuda(torch::Tensor input, torch::Tensor d_l2norm_scaler, int level, int bucket_size) {
int element_nums = input.numel();
int num_buckets = ceil((float)element_nums / bucket_size);
auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kFloat32).device(input.device()));
GPUDeternarizeMultiLevelValue(element_nums, (float*)buffer_data.data_ptr(), (int8_t*)input.data_ptr(),
(double*)d_l2norm_scaler.data_ptr(), level, bucket_size);
return buffer_data;
} | f0be759cd1624171d1d91f6f9fc82bc5a337e388.cu | #include <torch/extension.h>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <curand.h>
using namespace std;
#define EIGEN_USE_GPU
#define maxThreadsPerBlock 1024
__global__ void _qsgdreduceSumV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i];// + g_idata[i + blockDim.x];
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) sdata[tid] += sdata[tid + 512];
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdreduceClipThresholdV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
if (isfinite(g_idata[i])) {
sdata[tid] += g_idata[i] * g_idata[i];// + g_idata[i + blockDim.x] * g_idata[i + blockDim.x];
}
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) sdata[tid] += sdata[tid + 512];
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdreduceAbsMaxV2(float *g_odata, float *g_idata, unsigned int n)
{
extern __shared__ float sdata[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int blockSize = blockDim.x;
sdata[tid] = 0;
while (i < n) {
if (isfinite(g_idata[i]) && isfinite(sdata[tid]))
sdata[tid] = fmaxf(sdata[tid], fabsf(g_idata[i])); //fmaxf(fabsf(g_idata[i]), fabsf(g_idata[i + blockDim.x])));
else
sdata[tid] = nanf("123");
i += gridSize;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockSize >= 1024) {
if (tid < 512) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 512])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 512]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 256])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 256]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 128])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 128]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
if (isfinite(sdata[tid]) && isfinite(sdata[tid + 64])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 64]);
else sdata[tid] = nanf("123");
}
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = sdata;
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 32]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 32]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 16]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 16]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 8]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 8]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 4]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 4]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 2]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 2]);
else vsmem[tid] = nanf("123");
if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 1]))
vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 1]);
else vsmem[tid] = nanf("123");
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void _qsgdcomputeSqrt(float *scaler)
{
*scaler = sqrt(*scaler);
//printf("l2 norm result: %f\n", *scaler);
//__syncthreads();
}
__global__ void _qsgdinitCURand(unsigned int len, unsigned int seed, curandState* states)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
/* we have to initialize the state */
if (index < len)
curand_init(seed + index, /* the seed can be the same for each core, here we pass the time in from the CPU */
0, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[index]);
}
__global__ void _qsgdcompensateMemory(float *dst, const float *src, const float *local_mem, int len)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
for (int i = index; i < len; i += stride){
if (isfinite(src[i])) {
//dst[i] = src[i]; // + local_mem[i]; //remove memory compensation for comparison purposes.
dst[i] = src[i] + local_mem[i];
}
else {
dst[i] = nanf("123");
}
//printf("CompensateMemory result: idx=%d, src=%f, mem=%f, dst=%f\n", i, src[i], local_mem[i], dst[i]);
//__syncthreads();
}
}
__global__ void _qsgdTernarizeValue(int8_t *dst, const float *src, float *scaler, float *local_mem, const int len, int level, curandState* states)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
curandState local_state = states[index];
float norm_scaler = *scaler;
// The input tensor here has been clipped.
// Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i])
for (int i = index; i < len; i += stride) {
if (isfinite(norm_scaler) && isfinite(src[i])) {
float rand_sample = curand_uniform(&local_state);
float level_float = (float)level / norm_scaler * fabsf(src[i]);
int8_t previous_level = floor(level_float);
if (rand_sample < level_float - previous_level) {
dst[i] = previous_level + 1; // 1 is required by qsgd
}
else {
dst[i] = previous_level;
}
if (src[i] < 0){
dst[i] = -dst[i];
}
// update local memory
local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes.
}
else {
// encode value to the minimum for Inf or NaN
dst[i] = -128;
}
//printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]);
//__syncthreads();
}
}
// For qsgd allreduce
// __global__ void _qsgdDeternarizeValue(int len, float *dst, int8_t *src, float *scaler, int level)
// {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
// float norm_scaler = *scaler;
// for (int i = index; i < len; i += stride)
// {
// dst[i] = norm_scaler / (float)level * (float)src[i];
// }
// }
// For qsgd allgather
__global__ void _qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, float *scaler, int level)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float norm_scaler = *scaler;
for (int i = index; i < len; i += stride) {
if (src[i] == -128) {
dst[i] = nanf("123");
}
else {
dst[i] += norm_scaler / (float)level * (float)src[i];
}
//printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]);
//__syncthreads();
}
}
__global__ void _bucket_l2norm(const int len, double *dst, float *src, const int bucket_size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const int loop_times = len / bucket_size;
const int remain_nums = len % bucket_size;
for (int i = index; i < loop_times; i += stride)
{
#pragma unroll
for (int j = 0; j < bucket_size; j ++){
if (isfinite(src[bucket_size*i+j])) {
dst[i] += (double)(src[bucket_size*i+j]) * (double)(src[bucket_size*i+j]);
}
}
dst[i] = sqrt(dst[i]);
}
if (remain_nums && index == loop_times){
#pragma unroll
for (int i = 0; i < remain_nums; i++){
if (isfinite(src[bucket_size*loop_times+i])) {
dst[loop_times] += (double)(src[bucket_size*loop_times+i]) * (double)(src[bucket_size*loop_times+i]);
}
}
dst[loop_times] = sqrt(dst[loop_times]);
}
}
__global__ void _bucket_qsgdTernarizeValue(int8_t *dst, const float *src, double *scaler, const int len, int level, const int bucket_size, unsigned int seed)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
// curandState local_state = states[index];
curandState local_state;
// The input tensor here has been clipped.
// Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i])
for (int i = index; i < len; i += stride) {
float norm_scaler = (float)(scaler[i/bucket_size]);
curand_init(seed + index, 0, 0, &local_state);
if (isfinite(norm_scaler) && isfinite(src[i])) {
float rand_sample = curand_uniform(&local_state);
float level_float = (float)level / norm_scaler * fabsf(src[i]);
int8_t previous_level = floor(level_float);
if (rand_sample < level_float - previous_level) {
dst[i] = previous_level + 1; // 1 is required by qsgd
}
else {
dst[i] = previous_level;
}
if (src[i] < 0){
dst[i] = -dst[i];
}
// update local memory
//local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes.
}
else {
// encode value to the minimum for Inf or NaN
dst[i] = -128;
}
//printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]);
//__syncthreads();
}
}
// For qsgd allgather
__global__ void _bucket_qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < len; i += stride) {
float norm_scaler = (float)(scaler[i/bucket_size]);
if (src[i] == -128) {
dst[i] = nanf("123");
}
else {
dst[i] = norm_scaler / (float)level * (float)src[i];
//atomicAdd(dst+i, norm_scaler / (float)level * (float)src[i]);
}
//printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]);
//__syncthreads();
}
}
/*----------------------------------- Reduce Wrapper --------------------------------------------*/
void qsgdGPUReduce(int len, float *d_out, float *d_intermediate_res, float *result, int whichKernel, cudaStream_t stream) {
// d_intermediate_res holds the input
// setting up blocks
int numBlocks = (int) ceil(1.0 * len / maxThreadsPerBlock); //(len / maxThreadsPerBlock) + 1;
int prevNumBlocks = len;
// recursively reduce to get the result
while (numBlocks > maxThreadsPerBlock) {
// clear d_out
cudaMemset(d_out, 0, numBlocks * sizeof(float));
switch (whichKernel) {
// reduce sum
case 0:
_qsgdreduceSumV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len);
break;
// reduce absmax
case 1:
_qsgdreduceAbsMaxV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len);
break;
// reduce clip threshold
case 2:
_qsgdreduceClipThresholdV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len);
// we don't need to square the intermediate results.
whichKernel = 0;
break;
default:
break;
}
// by now, d_out holds the intermediate result, copy it to intermedaite_res for the next run
cudaMemcpy(d_intermediate_res, d_out, numBlocks * sizeof(float), cudaMemcpyDeviceToDevice);
// compute reduced problem size
prevNumBlocks = numBlocks;
len = numBlocks;
numBlocks = (int) ceil(1.0 * numBlocks / maxThreadsPerBlock); //numBlocks / maxThreadsPerBlock + 1;
}
// use one block to compute the rest.
// clear d_out
cudaMemset(d_out, 0, prevNumBlocks* sizeof(float));
switch (whichKernel) {
// reduce sum
case 0:
_qsgdreduceSumV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks);
break;
// reduce absmax
case 1:
_qsgdreduceAbsMaxV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks);
break;
// reduce clip threshold
case 2:
_qsgdreduceClipThresholdV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks);
break;
default:
break;
}
// as we just use one block, just move the first element of d_out to result
cudaMemcpy(result, d_out, sizeof(float), cudaMemcpyDeviceToDevice);
}
/*----------------------------------- Kernel Launch Wrappers ------------------------------------*/
void GPUReduceL2Norm(float *array, int len, double *l2norm_scaler, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
_bucket_l2norm<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(len, l2norm_scaler, array, bucket_size);
}
// void qsgdGPUInit_curand(int n, unsigned int seed, curandState* cuda_states)
// {
// int blocksPerGrid = (int) ceil(1.0 * n / maxThreadsPerBlock);
// _qsgdinitCURand<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(n, seed, cuda_states);
// }
// void qsgdGPUCompensateMemory(float *dst, const float *src, const float* local_mem, int len)
// {
// int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
// _qsgdcompensateMemory<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst, src, local_mem, len);
// }
void GPUTernarizeMultiLevelValue(int8_t *dst, const float *src, double *scaler, int len, int level, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * std::min(len, 1024 * 1024 * 25) / maxThreadsPerBlock);
unsigned int seed = time(NULL);
_bucket_qsgdTernarizeValue<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst, src, scaler, len, level, bucket_size, seed);
}
void GPUDeternarizeMultiLevelValue(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size)
{
int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock);
_bucket_qsgdDeternarizeAndAdd<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(len, dst, src, scaler, level, bucket_size);
}
std::vector<torch::Tensor> qsgd_compress_cuda(torch::Tensor input, int level, int bucket_size) {
int element_nums = input.numel();
int num_buckets = ceil((float)element_nums / bucket_size);
auto d_l2norm_scaler = torch::zeros(num_buckets, torch::TensorOptions().dtype(torch::kFloat64).device(input.device()));
auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt8).device(input.device()));
// curandState* cuda_states;
// cuda_states = (curandState*)torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt).device(input.device())).data_ptr();
// qsgdGPUInit_curand(element_nums, time(NULL), cuda_states);
GPUReduceL2Norm((float*)input.data_ptr(), element_nums, (double*)d_l2norm_scaler.data_ptr(), bucket_size);
GPUTernarizeMultiLevelValue((int8_t*)buffer_data.data_ptr(), (float*)input.data_ptr(), (double*)d_l2norm_scaler.data_ptr(),
element_nums, level, bucket_size);
return {buffer_data, d_l2norm_scaler};
}
torch::Tensor qsgd_decompress_cuda(torch::Tensor input, torch::Tensor d_l2norm_scaler, int level, int bucket_size) {
int element_nums = input.numel();
int num_buckets = ceil((float)element_nums / bucket_size);
auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kFloat32).device(input.device()));
GPUDeternarizeMultiLevelValue(element_nums, (float*)buffer_data.data_ptr(), (int8_t*)input.data_ptr(),
(double*)d_l2norm_scaler.data_ptr(), level, bucket_size);
return buffer_data;
} |
fc88e720517a75bf1b151e8d0b490eb868a363b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
void Linspace(double*, double, double, int);
void Uniform(double*, double, int);
__global__ void RungeKuttaStepOriginal(double*, double*, int);
__device__ void RightHandSide(double&, double, double);
template <class DataType>
DataType* AllocateHostMemory(int);
template <class DataType>
DataType* AllocateDeviceMemory(int);
int main()
{
// INITIAL SETUP ----------------------------------------------------------------------------------
int NumberOfProblems = 61440; // 92160
int BlockSize = 128;
hipSetDevice(1);
double* h_State = AllocateHostMemory<double>(NumberOfProblems);
double* h_Parameters = AllocateHostMemory<double>(NumberOfProblems);
double* d_State = AllocateDeviceMemory<double>(NumberOfProblems);
double* d_Parameters = AllocateDeviceMemory<double>(NumberOfProblems);
Linspace(h_Parameters, 0.1, 1.0, NumberOfProblems);
Uniform(h_State, -0.5, NumberOfProblems);
hipMemcpy(d_State, h_State, sizeof(double)*NumberOfProblems, hipMemcpyHostToDevice);
hipMemcpy(d_Parameters, h_Parameters, sizeof(double)*NumberOfProblems, hipMemcpyHostToDevice);
int GridSize = NumberOfProblems/BlockSize + (NumberOfProblems % BlockSize == 0 ? 0:1);
clock_t SimulationStart;
clock_t SimulationEnd;
SimulationStart = clock();
hipLaunchKernelGGL(( RungeKuttaStepOriginal), dim3(GridSize), dim3(BlockSize), 0, 0, d_State, d_Parameters, NumberOfProblems);
hipDeviceSynchronize();
SimulationEnd = clock();
cout << "Simulation time: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl << endl;
cout << "Simulation time / 1000 RK4 step: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl;
cout << "Ensemble size: " << NumberOfProblems << endl << endl;
hipMemcpy(h_State, d_State, sizeof(double)*NumberOfProblems, hipMemcpyDeviceToHost);
//for (int i=0; i<NumberOfProblems; i++)
// cout << "P: " << h_Parameters[i] << " Sates: " << h_State[i] << endl;
}
// AUXILIARY FUNCTION -----------------------------------------------------------------------------
void Linspace(double* x, double B, double E, int N)
{
double Increment;
x[0] = B;
if ( N>1 )
{
x[N-1] = E;
Increment = (E-B)/(N-1);
for (int i=1; i<N-1; i++)
{
x[i] = B + i*Increment;
}
}
}
void Uniform(double* x, double V, int N)
{
for (int i=0; i<N; i++)
{
x[i] = V;
}
}
__forceinline__ __device__ void RightHandSide(double& F, double X, double P)
{
F = X*X - P; // 1 FMA
}
__global__ void RungeKuttaStepOriginal(double* d_State, double* d_Parameters, int N)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N)
{
double X;
double P;
double k1;
double k2;
double k3;
double k4;
double x;
double dT = 0.01;
double dTp2 = 0.5*dT;
double dTp6 = dT * (1.0/6.0);
X = d_State[tid];
P = d_Parameters[tid];
for (int i=0; i<1000; i++)
{
// k1
RightHandSide(k1, X, P);
x = X + dTp2*k1;
RightHandSide(k2, x, P);
x = X + dTp2*k2;
RightHandSide(k3, x, P);
x = X + dT*k3;
RightHandSide(k4, x, P);
X = X + dTp6*( k1 + 2*k2 + 2*k3 + k4 );
}
d_State[tid] = X;
}
}
template <class DataType>
DataType* AllocateHostMemory(int N)
{
DataType* HostMemory = new (std::nothrow) DataType [N];
if (HostMemory == NULL)
{
std::cerr << "Failed to allocate Memory on the HOST!\n";
exit(EXIT_FAILURE);
}
return HostMemory;
}
template <class DataType>
DataType* AllocateDeviceMemory(int N)
{
hipError_t Error = hipSuccess;
DataType* MemoryAddressInDevice = NULL;
Error = hipMalloc((void**)&MemoryAddressInDevice, N * sizeof(DataType));
if (Error != hipSuccess)
{
std::cerr << "Failed to allocate Memory on the DEVICE!\n";
exit(EXIT_FAILURE);
}
return MemoryAddressInDevice;
} | fc88e720517a75bf1b151e8d0b490eb868a363b2.cu | #include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
void Linspace(double*, double, double, int);
void Uniform(double*, double, int);
__global__ void RungeKuttaStepOriginal(double*, double*, int);
__device__ void RightHandSide(double&, double, double);
template <class DataType>
DataType* AllocateHostMemory(int);
template <class DataType>
DataType* AllocateDeviceMemory(int);
int main()
{
// INITIAL SETUP ----------------------------------------------------------------------------------
int NumberOfProblems = 61440; // 92160
int BlockSize = 128;
cudaSetDevice(1);
double* h_State = AllocateHostMemory<double>(NumberOfProblems);
double* h_Parameters = AllocateHostMemory<double>(NumberOfProblems);
double* d_State = AllocateDeviceMemory<double>(NumberOfProblems);
double* d_Parameters = AllocateDeviceMemory<double>(NumberOfProblems);
Linspace(h_Parameters, 0.1, 1.0, NumberOfProblems);
Uniform(h_State, -0.5, NumberOfProblems);
cudaMemcpy(d_State, h_State, sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
cudaMemcpy(d_Parameters, h_Parameters, sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
int GridSize = NumberOfProblems/BlockSize + (NumberOfProblems % BlockSize == 0 ? 0:1);
clock_t SimulationStart;
clock_t SimulationEnd;
SimulationStart = clock();
RungeKuttaStepOriginal<<<GridSize, BlockSize>>> (d_State, d_Parameters, NumberOfProblems);
cudaDeviceSynchronize();
SimulationEnd = clock();
cout << "Simulation time: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl << endl;
cout << "Simulation time / 1000 RK4 step: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl;
cout << "Ensemble size: " << NumberOfProblems << endl << endl;
cudaMemcpy(h_State, d_State, sizeof(double)*NumberOfProblems, cudaMemcpyDeviceToHost);
//for (int i=0; i<NumberOfProblems; i++)
// cout << "P: " << h_Parameters[i] << " Sates: " << h_State[i] << endl;
}
// AUXILIARY FUNCTION -----------------------------------------------------------------------------
void Linspace(double* x, double B, double E, int N)
{
double Increment;
x[0] = B;
if ( N>1 )
{
x[N-1] = E;
Increment = (E-B)/(N-1);
for (int i=1; i<N-1; i++)
{
x[i] = B + i*Increment;
}
}
}
void Uniform(double* x, double V, int N)
{
for (int i=0; i<N; i++)
{
x[i] = V;
}
}
__forceinline__ __device__ void RightHandSide(double& F, double X, double P)
{
F = X*X - P; // 1 FMA
}
__global__ void RungeKuttaStepOriginal(double* d_State, double* d_Parameters, int N)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N)
{
double X;
double P;
double k1;
double k2;
double k3;
double k4;
double x;
double dT = 0.01;
double dTp2 = 0.5*dT;
double dTp6 = dT * (1.0/6.0);
X = d_State[tid];
P = d_Parameters[tid];
for (int i=0; i<1000; i++)
{
// k1
RightHandSide(k1, X, P);
x = X + dTp2*k1;
RightHandSide(k2, x, P);
x = X + dTp2*k2;
RightHandSide(k3, x, P);
x = X + dT*k3;
RightHandSide(k4, x, P);
X = X + dTp6*( k1 + 2*k2 + 2*k3 + k4 );
}
d_State[tid] = X;
}
}
template <class DataType>
DataType* AllocateHostMemory(int N)
{
DataType* HostMemory = new (std::nothrow) DataType [N];
if (HostMemory == NULL)
{
std::cerr << "Failed to allocate Memory on the HOST!\n";
exit(EXIT_FAILURE);
}
return HostMemory;
}
template <class DataType>
DataType* AllocateDeviceMemory(int N)
{
cudaError_t Error = cudaSuccess;
DataType* MemoryAddressInDevice = NULL;
Error = cudaMalloc((void**)&MemoryAddressInDevice, N * sizeof(DataType));
if (Error != cudaSuccess)
{
std::cerr << "Failed to allocate Memory on the DEVICE!\n";
exit(EXIT_FAILURE);
}
return MemoryAddressInDevice;
} |
2fc07a953cad6b692c5e808444ab43e940d64bd4.hip | // !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
| 2fc07a953cad6b692c5e808444ab43e940d64bd4.cu | /*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
90e1b23b11237a7d93a5bd48996b601a6bd6dd20.hip | // !!! This is a file automatically generated by hipify!!!
/* This program is writen by qp09.
* usually just for fun.
* Mon December 14 2015
*/
#include "../../third_party/cuda/helper_cuda.h"
#include "../../gpu_utils/mem_op.h"
#include "GArrayNeurons.h"
int cudaAllocArray(void *pCpu, void *pGpu, int num)
{
GArrayNeurons *pGpuNeurons = (GArrayNeurons*)pGpu;
GArrayNeurons *p = (GArrayNeurons*)pCpu;
pGpuNeurons->p_start = copyToGPU<int>(p->p_start, num);
pGpuNeurons->p_end = copyToGPU<int>(p->p_end, num);;
pGpuNeurons->p_fire_time = copyToGPU<int>(p->p_fire_time, p->p_end[num-1]);
return 0;
}
int hipFreeArray(void *pGpu)
{
GArrayNeurons *pGpuNeurons = (GArrayNeurons*)pGpu;
//TODO delete fire_time arrays
gpuFree(pGpuNeurons->p_start);
gpuFree(pGpuNeurons->p_end);
gpuFree(pGpuNeurons->p_fire_time);
return 0;
}
| 90e1b23b11237a7d93a5bd48996b601a6bd6dd20.cu | /* This program is writen by qp09.
* usually just for fun.
* Mon December 14 2015
*/
#include "../../third_party/cuda/helper_cuda.h"
#include "../../gpu_utils/mem_op.h"
#include "GArrayNeurons.h"
int cudaAllocArray(void *pCpu, void *pGpu, int num)
{
GArrayNeurons *pGpuNeurons = (GArrayNeurons*)pGpu;
GArrayNeurons *p = (GArrayNeurons*)pCpu;
pGpuNeurons->p_start = copyToGPU<int>(p->p_start, num);
pGpuNeurons->p_end = copyToGPU<int>(p->p_end, num);;
pGpuNeurons->p_fire_time = copyToGPU<int>(p->p_fire_time, p->p_end[num-1]);
return 0;
}
int cudaFreeArray(void *pGpu)
{
GArrayNeurons *pGpuNeurons = (GArrayNeurons*)pGpu;
//TODO delete fire_time arrays
gpuFree(pGpuNeurons->p_start);
gpuFree(pGpuNeurons->p_end);
gpuFree(pGpuNeurons->p_fire_time);
return 0;
}
|
d61e031c5d1cfcf8a235c2b35c248dffab9f1663.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector_td_utilities.h>
#include "cuSmallConvOperator.h"
#include "cudaDeviceManager.h"
using namespace Gadgetron;
static inline
void setup_grid( unsigned int number_of_elements, dim3 *blockDim, dim3* gridDim)
{
int cur_device = cudaDeviceManager::Instance()->getCurrentDevice();
//int maxGridDim = cudaDeviceManager::Instance()->max_griddim(cur_device);
int maxBlockDim = cudaDeviceManager::Instance()->max_blockdim(cur_device);
int maxGridDim = 65535;
// The default one-dimensional block dimension is...
*blockDim = dim3(256);
*gridDim = dim3((number_of_elements+blockDim->x-1)/blockDim->x);
// Extend block/grid dimensions if we exceeded the maximum grid dimension
if( gridDim->x > maxGridDim){
blockDim->x = maxBlockDim;
gridDim->x = (number_of_elements+blockDim->x-1)/blockDim->x;
}
if( gridDim->x > maxGridDim ){
gridDim->x = (unsigned int)::floor(std::sqrt(float(number_of_elements)/float(blockDim->x)));
unsigned int num_elements_1d = blockDim->x*gridDim->x;
gridDim->y *= ((number_of_elements+num_elements_1d-1)/num_elements_1d);
}
if( gridDim->x > maxGridDim || gridDim->y > maxGridDim){
// If this ever becomes an issue, there is an additional grid dimension to explore for compute models >= 2.0.
throw cuda_error("setup_grid(): too many elements requested.");
}
}
template<class T, unsigned int D, int STENCIL_SIZE, int DIM > __global__ static void tensorframeletKernel(const T* __restrict__ in, T* __restrict__ out, vector_td<vector_td<float,STENCIL_SIZE>,STENCIL_SIZE> stencil, vector_td<int,D> dims,int stride, bool accumulate){
const int elements = prod(dims);
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < prod(dims) ){
vector_td<T,STENCIL_SIZE> result(0);
auto co = idx_to_co(idx,dims);
co[DIM] = (co[DIM]-stride*(STENCIL_SIZE/2+1)+dims[DIM])%dims[DIM];
for (int i = 0; i < STENCIL_SIZE; i++){
co[DIM] = (co[DIM]+stride+dims[DIM])%dims[DIM];
T element = in[co_to_idx(co,dims)];
for (int k = 0; k < STENCIL_SIZE; k++)
result[k] += element*stencil[k][i];
}
if (accumulate)
for (int i = 0; i < STENCIL_SIZE; i++)
out[idx+i*elements] += result[i];
else
for (int i = 0; i < STENCIL_SIZE; i++)
out[idx+i*elements] = result[i];
}
};
template<class T, unsigned int D, int STENCIL_SIZE, int DIM> static void tensorFramelet(cuNDArray<T>* in, cuNDArray<T>* out,
vector_td<T,STENCIL_SIZE> stencil,int stride,
bool accumulate){
auto dims = *in->get_dimensions();
auto vdims = vector_td<int,D>(from_std_vector<size_t,D>(dims));
const size_t elements_per_batch = prod(vdims);
const size_t elements_total = in->get_number_of_elements();
dim3 grid,block;
setup_grid(elements_per_batch,&block,&grid);
for (int i = 0; i < elements_total/elements_per_batch; i++){
hipLaunchKernelGGL(( smallConvKernel<T,D,STENCIL_SIZE,DIM>), dim3(grid),dim3(block), 0, 0, in->get_data_ptr()+i*elements_per_batch,
out->get_data_ptr()+i*elements_per_batch, stencil, vdims,stride,accumulate);
}
}
template<class T, unsigned int D, unsigned int STENCIL_SIZE> void cuSmallConvOperator<T,D,STENCIL_SIZE>::mult_M(cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate) {
switch(dim) {
case 0:
smallConv<T,D,STENCIL_SIZE,0>(in,out,stencil,this->stride,accumulate);
break;
case 1:
smallConv<T,D,STENCIL_SIZE,1>(in,out,stencil,this->stride,accumulate);
break;
case 2:
smallConv<T,D,STENCIL_SIZE,2>(in,out,stencil,this->stride,accumulate);
break;
case 3:
smallConv<T,D,STENCIL_SIZE,3>(in,out,stencil,this->stride,accumulate);
break;
default:
throw std::runtime_error("Unsupported dimension");
}
}
template<class T, unsigned int D,unsigned int STENCIL_SIZE> void cuSmallConvOperator<T,D,STENCIL_SIZE>::mult_MH(cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate) {
switch(dim) {
case 0:
smallConv<T,D,STENCIL_SIZE,0>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 1:
smallConv<T,D,STENCIL_SIZE,1>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 2:
smallConv<T,D,STENCIL_SIZE,2>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 3:
smallConv<T,D,STENCIL_SIZE,3>(in,out,reverse_stencil,this->stride,accumulate);
break;
default:
throw std::runtime_error("Unsupported dimension");
}
}
template class cuSmallConvOperator<float,1,3>;
template class cuSmallConvOperator<float,2,3>;
template class cuSmallConvOperator<float,3,3>;
template class cuSmallConvOperator<float,4,3>; | d61e031c5d1cfcf8a235c2b35c248dffab9f1663.cu | #include <vector_td_utilities.h>
#include "cuSmallConvOperator.h"
#include "cudaDeviceManager.h"
using namespace Gadgetron;
static inline
void setup_grid( unsigned int number_of_elements, dim3 *blockDim, dim3* gridDim)
{
int cur_device = cudaDeviceManager::Instance()->getCurrentDevice();
//int maxGridDim = cudaDeviceManager::Instance()->max_griddim(cur_device);
int maxBlockDim = cudaDeviceManager::Instance()->max_blockdim(cur_device);
int maxGridDim = 65535;
// The default one-dimensional block dimension is...
*blockDim = dim3(256);
*gridDim = dim3((number_of_elements+blockDim->x-1)/blockDim->x);
// Extend block/grid dimensions if we exceeded the maximum grid dimension
if( gridDim->x > maxGridDim){
blockDim->x = maxBlockDim;
gridDim->x = (number_of_elements+blockDim->x-1)/blockDim->x;
}
if( gridDim->x > maxGridDim ){
gridDim->x = (unsigned int)std::floor(std::sqrt(float(number_of_elements)/float(blockDim->x)));
unsigned int num_elements_1d = blockDim->x*gridDim->x;
gridDim->y *= ((number_of_elements+num_elements_1d-1)/num_elements_1d);
}
if( gridDim->x > maxGridDim || gridDim->y > maxGridDim){
// If this ever becomes an issue, there is an additional grid dimension to explore for compute models >= 2.0.
throw cuda_error("setup_grid(): too many elements requested.");
}
}
template<class T, unsigned int D, int STENCIL_SIZE, int DIM > __global__ static void tensorframeletKernel(const T* __restrict__ in, T* __restrict__ out, vector_td<vector_td<float,STENCIL_SIZE>,STENCIL_SIZE> stencil, vector_td<int,D> dims,int stride, bool accumulate){
const int elements = prod(dims);
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < prod(dims) ){
vector_td<T,STENCIL_SIZE> result(0);
auto co = idx_to_co(idx,dims);
co[DIM] = (co[DIM]-stride*(STENCIL_SIZE/2+1)+dims[DIM])%dims[DIM];
for (int i = 0; i < STENCIL_SIZE; i++){
co[DIM] = (co[DIM]+stride+dims[DIM])%dims[DIM];
T element = in[co_to_idx(co,dims)];
for (int k = 0; k < STENCIL_SIZE; k++)
result[k] += element*stencil[k][i];
}
if (accumulate)
for (int i = 0; i < STENCIL_SIZE; i++)
out[idx+i*elements] += result[i];
else
for (int i = 0; i < STENCIL_SIZE; i++)
out[idx+i*elements] = result[i];
}
};
template<class T, unsigned int D, int STENCIL_SIZE, int DIM> static void tensorFramelet(cuNDArray<T>* in, cuNDArray<T>* out,
vector_td<T,STENCIL_SIZE> stencil,int stride,
bool accumulate){
auto dims = *in->get_dimensions();
auto vdims = vector_td<int,D>(from_std_vector<size_t,D>(dims));
const size_t elements_per_batch = prod(vdims);
const size_t elements_total = in->get_number_of_elements();
dim3 grid,block;
setup_grid(elements_per_batch,&block,&grid);
for (int i = 0; i < elements_total/elements_per_batch; i++){
smallConvKernel<T,D,STENCIL_SIZE,DIM><<<grid,block>>>(in->get_data_ptr()+i*elements_per_batch,
out->get_data_ptr()+i*elements_per_batch, stencil, vdims,stride,accumulate);
}
}
template<class T, unsigned int D, unsigned int STENCIL_SIZE> void cuSmallConvOperator<T,D,STENCIL_SIZE>::mult_M(cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate) {
switch(dim) {
case 0:
smallConv<T,D,STENCIL_SIZE,0>(in,out,stencil,this->stride,accumulate);
break;
case 1:
smallConv<T,D,STENCIL_SIZE,1>(in,out,stencil,this->stride,accumulate);
break;
case 2:
smallConv<T,D,STENCIL_SIZE,2>(in,out,stencil,this->stride,accumulate);
break;
case 3:
smallConv<T,D,STENCIL_SIZE,3>(in,out,stencil,this->stride,accumulate);
break;
default:
throw std::runtime_error("Unsupported dimension");
}
}
template<class T, unsigned int D,unsigned int STENCIL_SIZE> void cuSmallConvOperator<T,D,STENCIL_SIZE>::mult_MH(cuNDArray<T> *in, cuNDArray<T> *out,
bool accumulate) {
switch(dim) {
case 0:
smallConv<T,D,STENCIL_SIZE,0>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 1:
smallConv<T,D,STENCIL_SIZE,1>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 2:
smallConv<T,D,STENCIL_SIZE,2>(in,out,reverse_stencil,this->stride,accumulate);
break;
case 3:
smallConv<T,D,STENCIL_SIZE,3>(in,out,reverse_stencil,this->stride,accumulate);
break;
default:
throw std::runtime_error("Unsupported dimension");
}
}
template class cuSmallConvOperator<float,1,3>;
template class cuSmallConvOperator<float,2,3>;
template class cuSmallConvOperator<float,3,3>;
template class cuSmallConvOperator<float,4,3>; |
5b572992753d712baa21d72b58d3eead6c03dc94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void jacobi_init( const int x_inner, const int y_inner, const int halo_depth, const double* density, const double* energy, const double rx, const double ry, double* kx, double* ky, double* u0, double* u, const int coefficient)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
const double u_temp = energy[index]*density[index];
u0[index] = u_temp;
u[index] = u_temp;
if(row == 0 || col == 0) return;
double density_center;
double density_left;
double density_down;
if(coefficient == CONDUCTIVITY)
{
density_center = density[index];
density_left = density[index-1];
density_down = density[index-x];
}
else if(coefficient == RECIP_CONDUCTIVITY)
{
density_center = 1.0/density[index];
density_left = 1.0/density[index-1];
density_down = 1.0/density[index-x];
}
kx[index] = rx*(density_left+density_center) /
(2.0*density_left*density_center);
ky[index] = ry*(density_down+density_center) /
(2.0*density_down*density_center);
} | 5b572992753d712baa21d72b58d3eead6c03dc94.cu | #include "includes.h"
__global__ void jacobi_init( const int x_inner, const int y_inner, const int halo_depth, const double* density, const double* energy, const double rx, const double ry, double* kx, double* ky, double* u0, double* u, const int coefficient)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
const double u_temp = energy[index]*density[index];
u0[index] = u_temp;
u[index] = u_temp;
if(row == 0 || col == 0) return;
double density_center;
double density_left;
double density_down;
if(coefficient == CONDUCTIVITY)
{
density_center = density[index];
density_left = density[index-1];
density_down = density[index-x];
}
else if(coefficient == RECIP_CONDUCTIVITY)
{
density_center = 1.0/density[index];
density_left = 1.0/density[index-1];
density_down = 1.0/density[index-x];
}
kx[index] = rx*(density_left+density_center) /
(2.0*density_left*density_center);
ky[index] = ry*(density_down+density_center) /
(2.0*density_down*density_center);
} |
638ae3da175e5f7e5a9ac65cc08f7c0adb7a3565.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
/*
COMPILE --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencv.cu -o go `pkg-config --cflags --libs opencv` -w
EXECUTE --> ./main.exe
*/
//**********
//**OPENCV**
//**********
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <math.h>
#include <string>
#define JAN_OFFSET 0
using namespace cv;
using namespace std;
void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
Mat *img = (Mat*)userdata;
if ( event == EVENT_LBUTTONDOWN )
{
cout << "Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
img->at<uchar>(Point(x,y)) = 16;
}
else if ( event == EVENT_RBUTTONDOWN )
{
// cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
}
else if ( event == EVENT_MBUTTONDOWN )
{
//cout << "Middle button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
}
else if ( event == EVENT_MOUSEMOVE )
{
// cout << "Mouse move over the window - position (" << x << ", " << y << ")" << endl;
}
}
class Window
{
char *m_name;
public:
Window(char *name, int tam_ja, int x, int y,Mat *img = NULL)
{
m_name = name;
namedWindow(m_name, WINDOW_NORMAL & CV_GUI_NORMAL);
moveWindow(m_name, tam_ja * x + JAN_OFFSET, tam_ja * y + JAN_OFFSET);
resizeWindow(m_name, tam_ja, tam_ja);
setMouseCallback(m_name, CallBackFunc, img);
}
void imshow(Mat img)
{
cv::imshow(m_name, img);
}
void createTrackbar(char *trackName, int *var, int max_val)
{
cv::createTrackbar(trackName, m_name, var, max_val);
}
};
//**********
//**OPENCV**
//**********
__device__ void _2Dstencil_(int *d_e,int *d_r,float* c_coeff,int X,int Y,int k, int x, int y,int GX,int Gx,int Gy)
{
int h_e_i;
int h_r_i = x + ( y * (X) );
h_e_i = h_r_i;
int temp = d_e[h_r_i];
temp *= c_coeff[0];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
h_r_i = Gx + ( (Gy) * (GX) );
if(temp < 255)
d_r[h_r_i] = temp;
else
d_r[h_r_i] = 255;
}
__global__ void _2Dstencil_global(int *d_e,int *d_r,float *c_coeff,int X,int Y,int k,int times){
int x,y;//,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
int k2 = k/2*times;
extern __shared__ int shared[];
int blockThreadIndex = threadIdx.x + threadIdx.y*blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x+(k*times);
int Dy = blockDim.y+(k*times);
int sharedTam = Dx*Dy;
int * sharedRes = &shared[sharedTam];
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
shared[stride] = d_e[globalIdx];
else
shared[stride] = 0;
}
__syncthreads();
for(int t=times-1;t>0;t--)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x+(t*k);
int tDy = blockDim.y+(t*k);
int tk2 = (times-t)*k/2;
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for(int stride=blockThreadIndex;stride<tSharedTam;stride+=(blockDim.x*blockDim.y))
{
_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,(stride%tDx)+tk2,(stride/tDx)+tk2,Dx,(stride%tDx)+tk2,(stride/tDx)+tk2);
}
__syncthreads();
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
shared[stride]=sharedRes[stride];
}
__syncthreads();
}
_2Dstencil_(shared,d_r,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,X,x,y);
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
}
int main(int argc, char* argv[]) {
int *h_e,*h_r;
int *d_e, *d_r;
int size,tam,sharedSize,sharedTam;
int X=32;
int Y=32;
int k=4;
int times = 1;
int BX=32;
int BY=32;
int GX=1;
int GY=1;
float *c_coeff,*d_c_coeff;
if(argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if(argc > 2)
{
k = atoi(argv[2]);
}
if(argc > 3)
{
times = atoi(argv[3]);
}
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x+(k*times))*(block_dim.y+(k*times)))*sizeof(int)*2;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(int);
tam = X * Y;
h_e = (int*) malloc(size);
h_r = (int*) malloc(size);
c_coeff = (float*)malloc((k/2+1)*sizeof(float));
hipMalloc(&d_e, size);
hipMalloc(&d_r, size);
hipMalloc(&d_c_coeff,(k/2+1)*sizeof(float));
printf("\n coefs \n");
for(int i=0;i<(k/2+1);i++)
{
c_coeff[i]=(float)((k/2+1)-i)/(float)(k/2+1);
}
//c_coeff[0] = 0.0;
for(int i=0;i<(k/2+1);i++)
{
printf(" %f",c_coeff[i]);
}
printf("\n coefs \n");
//**********
//**OPENCV**
//**********
if (argc < 2)
{
printf("\nespecifique a imagem\n");
return -1;
}
//Mat orig = Mat::zeros(1024,1024,)//imread("doidera2.PNG"); //imread(argv[1]);
Mat orig = Mat::zeros(X,Y, CV_8U);
Mat result = Mat::zeros(X,Y, CV_8U);
//Window original("orig", 600, 0, 0,&orig);
Window resultado("result", 600, 2, 0,&result);
//**********
//**OPENCV**
//**********
// FILE *arq;
// arq = fopen("entrada.txt", "rt");
// for(int i=0;i<X;i++)
// for(int j=0;j<Y;j++)
// {
// //fscanf(arq," %d",&h_e[i+j*X]);
// h_e[i+j*X] = 0;
// if(i > 100 && i < 150 && j > 100 && j < 150 )
// h_e[i+j*X] = 2;
// orig.at<uchar>(Point(i,j)) = h_e[i+j*X];
// }
// //Mat orig = Mat(1024,1024, CV_8U, h_e);
// fclose(arq);
while (true)
{
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
{
h_e[i+j*X] = (int)result.at<uchar>(Point(i,j));
}
/* Copy vectors from host memory to device memory */
hipMemcpy(d_e, h_e, size, hipMemcpyHostToDevice);
hipMemcpy(d_c_coeff, c_coeff, (k/2+1)*sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
hipLaunchKernelGGL(( _2Dstencil_global), dim3(grid_dim),dim3(block_dim),sharedSize, 0, d_e,d_r,d_c_coeff,X,Y,k,times);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
hipDeviceSynchronize();
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
float elapsedTime;
hipEventElapsedTime (&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
printf ("[%d,%.5f],\n", tam,elapsedTime);
hipMemcpy(h_r, d_r, size, hipMemcpyDeviceToHost);
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
result.at<uchar>(Point(i,j)) = (uchar)h_r[i+j*X];
//original.imshow(orig);
resultado.imshow(result);
int *temp = h_e;
h_e = h_r;
h_r = temp;
// Wait for key is pressed then break loop
if (waitKey(0) == 27) //ESC == 27
{
break;
}
}
hipFree(d_e);
hipFree(d_r);
hipFree(d_c_coeff);
std::free(h_e);
std::free(h_r);
std::free(c_coeff);
return 0;
} /* main */
| 638ae3da175e5f7e5a9ac65cc08f7c0adb7a3565.cu | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
/*
COMPILE --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencv.cu -o go `pkg-config --cflags --libs opencv` -w
EXECUTE --> ./main.exe
*/
//**********
//**OPENCV**
//**********
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <math.h>
#include <string>
#define JAN_OFFSET 0
using namespace cv;
using namespace std;
void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
Mat *img = (Mat*)userdata;
if ( event == EVENT_LBUTTONDOWN )
{
cout << "Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
img->at<uchar>(Point(x,y)) = 16;
}
else if ( event == EVENT_RBUTTONDOWN )
{
// cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
}
else if ( event == EVENT_MBUTTONDOWN )
{
//cout << "Middle button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
}
else if ( event == EVENT_MOUSEMOVE )
{
// cout << "Mouse move over the window - position (" << x << ", " << y << ")" << endl;
}
}
class Window
{
char *m_name;
public:
Window(char *name, int tam_ja, int x, int y,Mat *img = NULL)
{
m_name = name;
namedWindow(m_name, WINDOW_NORMAL & CV_GUI_NORMAL);
moveWindow(m_name, tam_ja * x + JAN_OFFSET, tam_ja * y + JAN_OFFSET);
resizeWindow(m_name, tam_ja, tam_ja);
setMouseCallback(m_name, CallBackFunc, img);
}
void imshow(Mat img)
{
cv::imshow(m_name, img);
}
void createTrackbar(char *trackName, int *var, int max_val)
{
cv::createTrackbar(trackName, m_name, var, max_val);
}
};
//**********
//**OPENCV**
//**********
__device__ void _2Dstencil_(int *d_e,int *d_r,float* c_coeff,int X,int Y,int k, int x, int y,int GX,int Gx,int Gy)
{
int h_e_i;
int h_r_i = x + ( y * (X) );
h_e_i = h_r_i;
int temp = d_e[h_r_i];
temp *= c_coeff[0];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
h_r_i = Gx + ( (Gy) * (GX) );
if(temp < 255)
d_r[h_r_i] = temp;
else
d_r[h_r_i] = 255;
}
__global__ void _2Dstencil_global(int *d_e,int *d_r,float *c_coeff,int X,int Y,int k,int times){
int x,y;//,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
int k2 = k/2*times;
extern __shared__ int shared[];
int blockThreadIndex = threadIdx.x + threadIdx.y*blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x+(k*times);
int Dy = blockDim.y+(k*times);
int sharedTam = Dx*Dy;
int * sharedRes = &shared[sharedTam];
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
shared[stride] = d_e[globalIdx];
else
shared[stride] = 0;
}
__syncthreads();
for(int t=times-1;t>0;t--)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x+(t*k);
int tDy = blockDim.y+(t*k);
int tk2 = (times-t)*k/2;
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for(int stride=blockThreadIndex;stride<tSharedTam;stride+=(blockDim.x*blockDim.y))
{
_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,(stride%tDx)+tk2,(stride/tDx)+tk2,Dx,(stride%tDx)+tk2,(stride/tDx)+tk2);
}
__syncthreads();
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
shared[stride]=sharedRes[stride];
}
__syncthreads();
}
_2Dstencil_(shared,d_r,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,X,x,y);
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
}
int main(int argc, char* argv[]) {
int *h_e,*h_r;
int *d_e, *d_r;
int size,tam,sharedSize,sharedTam;
int X=32;
int Y=32;
int k=4;
int times = 1;
int BX=32;
int BY=32;
int GX=1;
int GY=1;
float *c_coeff,*d_c_coeff;
if(argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if(argc > 2)
{
k = atoi(argv[2]);
}
if(argc > 3)
{
times = atoi(argv[3]);
}
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x+(k*times))*(block_dim.y+(k*times)))*sizeof(int)*2;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(int);
tam = X * Y;
h_e = (int*) malloc(size);
h_r = (int*) malloc(size);
c_coeff = (float*)malloc((k/2+1)*sizeof(float));
cudaMalloc(&d_e, size);
cudaMalloc(&d_r, size);
cudaMalloc(&d_c_coeff,(k/2+1)*sizeof(float));
printf("\n coefs \n");
for(int i=0;i<(k/2+1);i++)
{
c_coeff[i]=(float)((k/2+1)-i)/(float)(k/2+1);
}
//c_coeff[0] = 0.0;
for(int i=0;i<(k/2+1);i++)
{
printf(" %f",c_coeff[i]);
}
printf("\n coefs \n");
//**********
//**OPENCV**
//**********
if (argc < 2)
{
printf("\nespecifique a imagem\n");
return -1;
}
//Mat orig = Mat::zeros(1024,1024,)//imread("doidera2.PNG"); //imread(argv[1]);
Mat orig = Mat::zeros(X,Y, CV_8U);
Mat result = Mat::zeros(X,Y, CV_8U);
//Window original("orig", 600, 0, 0,&orig);
Window resultado("result", 600, 2, 0,&result);
//**********
//**OPENCV**
//**********
// FILE *arq;
// arq = fopen("entrada.txt", "rt");
// for(int i=0;i<X;i++)
// for(int j=0;j<Y;j++)
// {
// //fscanf(arq," %d",&h_e[i+j*X]);
// h_e[i+j*X] = 0;
// if(i > 100 && i < 150 && j > 100 && j < 150 )
// h_e[i+j*X] = 2;
// orig.at<uchar>(Point(i,j)) = h_e[i+j*X];
// }
// //Mat orig = Mat(1024,1024, CV_8U, h_e);
// fclose(arq);
while (true)
{
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
{
h_e[i+j*X] = (int)result.at<uchar>(Point(i,j));
}
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c_coeff, c_coeff, (k/2+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
_2Dstencil_global<<<grid_dim,block_dim,sharedSize>>>(d_e,d_r,d_c_coeff,X,Y,k,times);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
printf ("[%d,%.5f],\n", tam,elapsedTime);
cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost);
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
result.at<uchar>(Point(i,j)) = (uchar)h_r[i+j*X];
//original.imshow(orig);
resultado.imshow(result);
int *temp = h_e;
h_e = h_r;
h_r = temp;
// Wait for key is pressed then break loop
if (waitKey(0) == 27) //ESC == 27
{
break;
}
}
cudaFree(d_e);
cudaFree(d_r);
cudaFree(d_c_coeff);
std::free(h_e);
std::free(h_r);
std::free(c_coeff);
return 0;
} /* main */
|
3fe5758fc8e2f6f8ef9a99d301fa86b697662c09.hip | // !!! This is a file automatically generated by hipify!!!
#include <cupy/complex.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/device/device_spmv.cuh>
#include <hipcub/hipcub.hpp>
#include "cupy_cub.h"
#include <stdexcept>
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
#include <hip/hip_fp16.h>
#endif
using namespace cub;
/* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */
// - This works only because all data fields in the *Traits struct are not
// used in <hipcub/hipcub.hpp>.
// - The Max() and Lowest() below are chosen to comply with NumPy's lexical
// ordering; note that std::numeric_limits<T> does not support complex
// numbers as in general the comparison is ill defined.
// - DO NOT USE THIS STUB for supporting CUB sorting!!!!!!
template <>
struct FpLimits<complex<float>>
{
static __host__ __device__ __forceinline__ complex<float> Max() {
return (complex<float>(FLT_MAX, FLT_MAX));
}
static __host__ __device__ __forceinline__ complex<float> Lowest() {
return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1)));
}
};
template <>
struct FpLimits<complex<double>>
{
static __host__ __device__ __forceinline__ complex<double> Max() {
return (complex<double>(DBL_MAX, DBL_MAX));
}
static __host__ __device__ __forceinline__ complex<double> Lowest() {
return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1)));
}
};
template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {};
template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {};
/* ------------------------------------ end of boilerplate ------------------------------------ */
/* ------------------------------------ "Patches" to CUB ------------------------------------
These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain
behaviors with which we must comply.
*/
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO: avoid cast to float
return isnan(__half2float(x));
#endif
}
__host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l < r;
#else
// TODO: avoid cast to float
return __half2float(l) < __half2float(r);
#endif
}
__host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l == r;
#else
// TODO: avoid cast to float
return __half2float(l) == __half2float(r);
#endif
}
#endif
//
// Max()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return b;}
else {return a;}
}
#endif
//
// Min()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return a;}
else {return b;}
}
#endif
//
// ArgMax()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(a.value, b.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
//
// ArgMin()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(b.value, a.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
/* ------------------------------------ End of "patches" ------------------------------------ */
//
// **** dtype_dispatcher ****
//
// This is implemented with reference to the following implementation.
// https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp
//
template <class functor_t, typename... Ts>
void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args)
{
switch (dtype_id) {
case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...);
case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...);
case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...);
case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...);
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
case CUPY_CUB_FLOAT16: return f.template operator()<__half>(std::forward<Ts>(args)...);
#endif
case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...);
case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...);
case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...);
case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...);
default:
throw std::runtime_error("Unsupported dtype ID");
}
}
//
// **** CUB Sum ****
//
struct _cub_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Sum(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Min ****
//
struct _cub_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Min(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Max ****
//
struct _cub_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Max(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB ArgMin ****
//
struct _cub_reduce_argmin {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmin
//
// **** CUB ArgMax ****
//
struct _cub_reduce_argmax {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmax
//
// **** CUB SpMV ****
//
struct _cub_device_spmv {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y,
int num_rows, int num_cols, int num_nonzeros, hipStream_t stream)
{
DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values),
static_cast<int*>(row_offsets), static_cast<int*>(column_indices),
static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols,
num_nonzeros, stream);
}
};
//
// **** CUB InclusiveSum ****
//
struct _cub_inclusive_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, hipStream_t s)
{
DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), num_items, s);
}
};
//
// **** CUB inclusive product ****
//
struct _cub_inclusive_product {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, hipStream_t s)
{
_multiply product_op;
DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), product_op, num_items, s);
}
// product functor
struct _multiply
{
template <typename T>
__host__ __device__ __forceinline__
T operator()(const T &a, const T &b) const {
return a * b;
}
};
};
//
// APIs exposed to CuPy
//
/* -------- device reduce -------- */
void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(),
workspace, workspace_size, x, y, num_items, stream);
default: throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_reduce(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device segmented reduce -------- */
void cub_device_segmented_reduce(void* workspace, size_t& workspace_size,
void* x, void* y, int num_segments, void* offset_start, void* offset_end,
hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MIN:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MAX:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y,
int num_segments, void* offset_start, void* offset_end,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments,
offset_start, offset_end, stream,
op, dtype_id);
return workspace_size;
}
/*--------- device spmv (sparse-matrix dense-vector multiply) ---------*/
void cub_device_spmv(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y, int num_rows,
int num_cols, int num_nonzeros, hipStream_t stream,
int dtype_id)
{
return dtype_dispatcher(dtype_id, _cub_device_spmv(),
workspace, workspace_size, values, row_offsets,
column_indices, x, y, num_rows, num_cols,
num_nonzeros, stream);
}
size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets,
void* column_indices, void* x, void* y, int num_rows, int num_cols,
int num_nonzeros, hipStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices,
x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id);
return workspace_size;
}
/* -------- device scan -------- */
void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_CUMSUM:
return dtype_dispatcher(dtype_id, _cub_inclusive_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_CUMPROD:
return dtype_dispatcher(dtype_id, _cub_inclusive_product(),
workspace, workspace_size, x, y, num_items, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_scan(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
| 3fe5758fc8e2f6f8ef9a99d301fa86b697662c09.cu | #include <cupy/complex.cuh>
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/device/device_spmv.cuh>
#include <cub/device/device_scan.cuh>
#include "cupy_cub.h"
#include <stdexcept>
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
#include <cuda_fp16.h>
#endif
using namespace cub;
/* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */
// - This works only because all data fields in the *Traits struct are not
// used in <cub/device/device_reduce.cuh>.
// - The Max() and Lowest() below are chosen to comply with NumPy's lexical
// ordering; note that std::numeric_limits<T> does not support complex
// numbers as in general the comparison is ill defined.
// - DO NOT USE THIS STUB for supporting CUB sorting!!!!!!
template <>
struct FpLimits<complex<float>>
{
static __host__ __device__ __forceinline__ complex<float> Max() {
return (complex<float>(FLT_MAX, FLT_MAX));
}
static __host__ __device__ __forceinline__ complex<float> Lowest() {
return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1)));
}
};
template <>
struct FpLimits<complex<double>>
{
static __host__ __device__ __forceinline__ complex<double> Max() {
return (complex<double>(DBL_MAX, DBL_MAX));
}
static __host__ __device__ __forceinline__ complex<double> Lowest() {
return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1)));
}
};
template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {};
template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {};
/* ------------------------------------ end of boilerplate ------------------------------------ */
/* ------------------------------------ "Patches" to CUB ------------------------------------
These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain
behaviors with which we must comply.
*/
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO: avoid cast to float
return isnan(__half2float(x));
#endif
}
__host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l < r;
#else
// TODO: avoid cast to float
return __half2float(l) < __half2float(r);
#endif
}
__host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l == r;
#else
// TODO: avoid cast to float
return __half2float(l) == __half2float(r);
#endif
}
#endif
//
// Max()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return b;}
else {return a;}
}
#endif
//
// Min()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return a;}
else {return b;}
}
#endif
//
// ArgMax()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(a.value, b.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
//
// ArgMin()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(b.value, a.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
/* ------------------------------------ End of "patches" ------------------------------------ */
//
// **** dtype_dispatcher ****
//
// This is implemented with reference to the following implementation.
// https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp
//
template <class functor_t, typename... Ts>
void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args)
{
switch (dtype_id) {
case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...);
case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...);
case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...);
case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...);
case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...);
#if __CUDACC_VER_MAJOR__ >= 9 && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
case CUPY_CUB_FLOAT16: return f.template operator()<__half>(std::forward<Ts>(args)...);
#endif
case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...);
case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...);
case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...);
case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...);
default:
throw std::runtime_error("Unsupported dtype ID");
}
}
//
// **** CUB Sum ****
//
struct _cub_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Sum(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Min ****
//
struct _cub_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Min(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Max ****
//
struct _cub_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Max(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB ArgMin ****
//
struct _cub_reduce_argmin {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmin
//
// **** CUB ArgMax ****
//
struct _cub_reduce_argmax {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmax
//
// **** CUB SpMV ****
//
struct _cub_device_spmv {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y,
int num_rows, int num_cols, int num_nonzeros, cudaStream_t stream)
{
DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values),
static_cast<int*>(row_offsets), static_cast<int*>(column_indices),
static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols,
num_nonzeros, stream);
}
};
//
// **** CUB InclusiveSum ****
//
struct _cub_inclusive_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, cudaStream_t s)
{
DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), num_items, s);
}
};
//
// **** CUB inclusive product ****
//
struct _cub_inclusive_product {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, cudaStream_t s)
{
_multiply product_op;
DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), product_op, num_items, s);
}
// product functor
struct _multiply
{
template <typename T>
__host__ __device__ __forceinline__
T operator()(const T &a, const T &b) const {
return a * b;
}
};
};
//
// APIs exposed to CuPy
//
/* -------- device reduce -------- */
void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(),
workspace, workspace_size, x, y, num_items, stream);
default: throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_reduce(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device segmented reduce -------- */
void cub_device_segmented_reduce(void* workspace, size_t& workspace_size,
void* x, void* y, int num_segments, void* offset_start, void* offset_end,
cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MIN:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MAX:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y,
int num_segments, void* offset_start, void* offset_end,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments,
offset_start, offset_end, stream,
op, dtype_id);
return workspace_size;
}
/*--------- device spmv (sparse-matrix dense-vector multiply) ---------*/
void cub_device_spmv(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y, int num_rows,
int num_cols, int num_nonzeros, cudaStream_t stream,
int dtype_id)
{
return dtype_dispatcher(dtype_id, _cub_device_spmv(),
workspace, workspace_size, values, row_offsets,
column_indices, x, y, num_rows, num_cols,
num_nonzeros, stream);
}
size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets,
void* column_indices, void* x, void* y, int num_rows, int num_cols,
int num_nonzeros, cudaStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices,
x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id);
return workspace_size;
}
/* -------- device scan -------- */
void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_CUMSUM:
return dtype_dispatcher(dtype_id, _cub_inclusive_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_CUMPROD:
return dtype_dispatcher(dtype_id, _cub_inclusive_product(),
workspace, workspace_size, x, y, num_items, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_scan(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
|
bdb4a03b7d16c354202c97fbad756dcc7c1afad4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "Static/TriangleCounting/triangle2.cuh"
namespace hornets_nest {
TriangleCounting2::TriangleCounting2(HornetGraph& hornet) :
StaticAlgorithm(hornet)
{
}
TriangleCounting2::~TriangleCounting2(){
release();
}
struct OPERATOR_InitTriangleCounts {
triangle_t *d_triPerVertex;
OPERATOR (Vertex &vertex) {
d_triPerVertex[vertex.id()] = 0;
}
};
/*
* Naive intersection operator
* Assumption: access to entire adjacencies of v1 and v2 required
*/
struct OPERATOR_AdjIntersectionCount {
triangle_t* d_triPerVertex;
OPERATOR(Vertex& v1, Vertex& v2, int flag) {
triangle_t count = 0;
int deg1 = v1.degree();
int deg2 = v2.degree();
vid_t* ui_begin = v1.neighbor_ptr();
vid_t* vi_begin = v2.neighbor_ptr();
vid_t* ui_end = ui_begin+deg1-1;
vid_t* vi_end = vi_begin+deg2-1;
int comp_equals, comp1, comp2;
while (vi_begin <= vi_end && ui_begin <= ui_end) {
comp_equals = (*ui_begin == *vi_begin);
count += comp_equals;
comp1 = (*ui_begin >= *vi_begin);
comp2 = (*ui_begin <= *vi_begin);
vi_begin += comp1;
ui_begin += comp2;
// early termination
if ((vi_begin > vi_end) || (ui_begin > ui_end))
break;
}
atomicAdd(d_triPerVertex+v1.id(), count);
atomicAdd(d_triPerVertex+v2.id(), count);
}
};
struct OPERATOR_AdjIntersectionCountBalanced {
triangle_t* d_triPerVertex;
OPERATOR(Vertex &u, Vertex& v, vid_t* ui_begin, vid_t* ui_end, vid_t* vi_begin, vid_t* vi_end, int FLAG) {
int count = 0;
if (!FLAG) {
int comp_equals, comp1, comp2, ui_bound, vi_bound;
//printf("Intersecting %d, %d: %d -> %d, %d -> %d\n", u.id(), v.id(), *ui_begin, *ui_end, *vi_begin, *vi_end);
while (vi_begin <= vi_end && ui_begin <= ui_end) {
comp_equals = (*ui_begin == *vi_begin);
count += comp_equals;
comp1 = (*ui_begin >= *vi_begin);
comp2 = (*ui_begin <= *vi_begin);
ui_bound = (ui_begin == ui_end);
vi_bound = (vi_begin == vi_end);
// early termination
if ((ui_bound && comp2) || (vi_bound && comp1))
break;
if ((comp1 && !vi_bound) || ui_bound)
vi_begin += 1;
if ((comp2 && !ui_bound) || vi_bound)
ui_begin += 1;
}
} else {
vid_t vi_low, vi_high, vi_mid;
while (ui_begin <= ui_end) {
auto search_val = *ui_begin;
vi_low = 0;
vi_high = vi_end-vi_begin;
while (vi_low <= vi_high) {
vi_mid = (vi_low+vi_high)/2;
auto comp = (*(vi_begin+vi_mid) - search_val);
if (!comp) {
count += 1;
break;
}
if (comp > 0) {
vi_high = vi_mid-1;
} else if (comp < 0) {
vi_low = vi_mid+1;
}
}
ui_begin += 1;
}
}
atomicAdd(d_triPerVertex+u.id(), count);
atomicAdd(d_triPerVertex+v.id(), count);
}
};
triangle_t TriangleCounting2::countTriangles(){
triangle_t* h_triPerVertex;
host::allocate(h_triPerVertex, hornet.nV());
gpu::copyToHost(triPerVertex, hornet.nV(), h_triPerVertex);
triangle_t sum=0;
for(int i=0; i<hornet.nV(); i++){
// printf("%d %ld\n", i,outputArray[i]);
sum+=h_triPerVertex[i];
}
free(h_triPerVertex);
//triangle_t sum=gpu::reduce(hd_triangleData().triPerVertex, hd_triangleData().nv+1);
return sum;
}
void TriangleCounting2::reset(){
//printf("Inside reset()\n");
forAllVertices(hornet, OPERATOR_InitTriangleCounts { triPerVertex });
}
void TriangleCounting2::run(){
//printf("Inside run()\n");
forAllAdjUnions(hornet, OPERATOR_AdjIntersectionCountBalanced { triPerVertex });
//forAllAdjUnions(hornet, OPERATOR_AdjIntersectionCount { triPerVertex });
}
void TriangleCounting2::release(){
//printf("Inside release\n");
gpu::free(triPerVertex);
triPerVertex = nullptr;
}
void TriangleCounting2::init(){
//printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV());
gpu::allocate(triPerVertex, hornet.nV());
reset();
}
} // namespace hornets_nest
| bdb4a03b7d16c354202c97fbad756dcc7c1afad4.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "Static/TriangleCounting/triangle2.cuh"
namespace hornets_nest {
TriangleCounting2::TriangleCounting2(HornetGraph& hornet) :
StaticAlgorithm(hornet)
{
}
TriangleCounting2::~TriangleCounting2(){
release();
}
struct OPERATOR_InitTriangleCounts {
triangle_t *d_triPerVertex;
OPERATOR (Vertex &vertex) {
d_triPerVertex[vertex.id()] = 0;
}
};
/*
* Naive intersection operator
* Assumption: access to entire adjacencies of v1 and v2 required
*/
struct OPERATOR_AdjIntersectionCount {
triangle_t* d_triPerVertex;
OPERATOR(Vertex& v1, Vertex& v2, int flag) {
triangle_t count = 0;
int deg1 = v1.degree();
int deg2 = v2.degree();
vid_t* ui_begin = v1.neighbor_ptr();
vid_t* vi_begin = v2.neighbor_ptr();
vid_t* ui_end = ui_begin+deg1-1;
vid_t* vi_end = vi_begin+deg2-1;
int comp_equals, comp1, comp2;
while (vi_begin <= vi_end && ui_begin <= ui_end) {
comp_equals = (*ui_begin == *vi_begin);
count += comp_equals;
comp1 = (*ui_begin >= *vi_begin);
comp2 = (*ui_begin <= *vi_begin);
vi_begin += comp1;
ui_begin += comp2;
// early termination
if ((vi_begin > vi_end) || (ui_begin > ui_end))
break;
}
atomicAdd(d_triPerVertex+v1.id(), count);
atomicAdd(d_triPerVertex+v2.id(), count);
}
};
struct OPERATOR_AdjIntersectionCountBalanced {
triangle_t* d_triPerVertex;
OPERATOR(Vertex &u, Vertex& v, vid_t* ui_begin, vid_t* ui_end, vid_t* vi_begin, vid_t* vi_end, int FLAG) {
int count = 0;
if (!FLAG) {
int comp_equals, comp1, comp2, ui_bound, vi_bound;
//printf("Intersecting %d, %d: %d -> %d, %d -> %d\n", u.id(), v.id(), *ui_begin, *ui_end, *vi_begin, *vi_end);
while (vi_begin <= vi_end && ui_begin <= ui_end) {
comp_equals = (*ui_begin == *vi_begin);
count += comp_equals;
comp1 = (*ui_begin >= *vi_begin);
comp2 = (*ui_begin <= *vi_begin);
ui_bound = (ui_begin == ui_end);
vi_bound = (vi_begin == vi_end);
// early termination
if ((ui_bound && comp2) || (vi_bound && comp1))
break;
if ((comp1 && !vi_bound) || ui_bound)
vi_begin += 1;
if ((comp2 && !ui_bound) || vi_bound)
ui_begin += 1;
}
} else {
vid_t vi_low, vi_high, vi_mid;
while (ui_begin <= ui_end) {
auto search_val = *ui_begin;
vi_low = 0;
vi_high = vi_end-vi_begin;
while (vi_low <= vi_high) {
vi_mid = (vi_low+vi_high)/2;
auto comp = (*(vi_begin+vi_mid) - search_val);
if (!comp) {
count += 1;
break;
}
if (comp > 0) {
vi_high = vi_mid-1;
} else if (comp < 0) {
vi_low = vi_mid+1;
}
}
ui_begin += 1;
}
}
atomicAdd(d_triPerVertex+u.id(), count);
atomicAdd(d_triPerVertex+v.id(), count);
}
};
triangle_t TriangleCounting2::countTriangles(){
triangle_t* h_triPerVertex;
host::allocate(h_triPerVertex, hornet.nV());
gpu::copyToHost(triPerVertex, hornet.nV(), h_triPerVertex);
triangle_t sum=0;
for(int i=0; i<hornet.nV(); i++){
// printf("%d %ld\n", i,outputArray[i]);
sum+=h_triPerVertex[i];
}
free(h_triPerVertex);
//triangle_t sum=gpu::reduce(hd_triangleData().triPerVertex, hd_triangleData().nv+1);
return sum;
}
void TriangleCounting2::reset(){
//printf("Inside reset()\n");
forAllVertices(hornet, OPERATOR_InitTriangleCounts { triPerVertex });
}
void TriangleCounting2::run(){
//printf("Inside run()\n");
forAllAdjUnions(hornet, OPERATOR_AdjIntersectionCountBalanced { triPerVertex });
//forAllAdjUnions(hornet, OPERATOR_AdjIntersectionCount { triPerVertex });
}
void TriangleCounting2::release(){
//printf("Inside release\n");
gpu::free(triPerVertex);
triPerVertex = nullptr;
}
void TriangleCounting2::init(){
//printf("Inside init. Printing hornet.nV(): %d\n", hornet.nV());
gpu::allocate(triPerVertex, hornet.nV());
reset();
}
} // namespace hornets_nest
|
f9c38a7e8048adb2138ab92383ed6312185468e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
#define CUDA_ERRCK \
{hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
fprintf(stderr, "CUDA error on line %d: %s\n", __LINE__, hipGetErrorString(err)); \
exit(-1); \
} \
}
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
/* Values in the k-space coordinate system are stored in constant memory
* on the GPU */
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
float sX;
float sY;
float sZ;
float sQr;
float sQi;
// Determine the element of the X arrays computed by this thread
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
// Read block's X values from global mem to shared mem
sX = x[xIndex];
sY = y[xIndex];
sZ = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
// Loop over all elements of K in constant mem to compute a partial value
// for X.
int kIndex = 0;
if (numK % 2) {
float expArg = PIx2 * (ck[0].Kx * sX + ck[0].Ky * sY + ck[0].Kz * sZ);
sQr += ck[0].PhiMag * cos(expArg);
sQi += ck[0].PhiMag * sin(expArg);
kIndex++;
kGlobalIndex++;
}
for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex += 2, kGlobalIndex += 2) {
float expArg = PIx2 * (ck[kIndex].Kx * sX +
ck[kIndex].Ky * sY +
ck[kIndex].Kz * sZ);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
int kIndex1 = kIndex + 1;
float expArg1 = PIx2 * (ck[kIndex1].Kx * sX +
ck[kIndex1].Ky * sY +
ck[kIndex1].Kz * sZ);
sQr += ck[kIndex1].PhiMag * cos(expArg1);
sQi += ck[kIndex1].PhiMag * sin(expArg1);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
} | f9c38a7e8048adb2138ab92383ed6312185468e3.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
#define CUDA_ERRCK \
{cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
fprintf(stderr, "CUDA error on line %d: %s\n", __LINE__, cudaGetErrorString(err)); \
exit(-1); \
} \
}
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
/* Values in the k-space coordinate system are stored in constant memory
* on the GPU */
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
float sX;
float sY;
float sZ;
float sQr;
float sQi;
// Determine the element of the X arrays computed by this thread
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
// Read block's X values from global mem to shared mem
sX = x[xIndex];
sY = y[xIndex];
sZ = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
// Loop over all elements of K in constant mem to compute a partial value
// for X.
int kIndex = 0;
if (numK % 2) {
float expArg = PIx2 * (ck[0].Kx * sX + ck[0].Ky * sY + ck[0].Kz * sZ);
sQr += ck[0].PhiMag * cos(expArg);
sQi += ck[0].PhiMag * sin(expArg);
kIndex++;
kGlobalIndex++;
}
for (; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex += 2, kGlobalIndex += 2) {
float expArg = PIx2 * (ck[kIndex].Kx * sX +
ck[kIndex].Ky * sY +
ck[kIndex].Kz * sZ);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
int kIndex1 = kIndex + 1;
float expArg1 = PIx2 * (ck[kIndex1].Kx * sX +
ck[kIndex1].Ky * sY +
ck[kIndex1].Kz * sZ);
sQr += ck[kIndex1].PhiMag * cos(expArg1);
sQi += ck[kIndex1].PhiMag * sin(expArg1);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
} |
3d44d3f941424962642db17e8c757e26bf6959f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _IMAGEFILTER_KERNEL_H_
#define _IMAGEFILTER_KERNEL_H_
#define BOUND 4
#define THREAD_PER_WARP 32
#define SQUARE_WIDTH 120
__global__ void imageFilterKernelPartA(char3* inputPixels, char3* outputPixels, uint width, uint height, int pxls_per_thrd)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx = 0; idx < pxls_per_thrd; idx ++){
int currIdx = index * pxls_per_thrd + idx;
int idx_x = currIdx % width;
int idx_y = currIdx / width;
int3 sum = {0, 0, 0};
int count = 0;
for(int i = -BOUND; i <= BOUND; i++)
{
for(int j = -BOUND; j <= BOUND; j++)
{
if(((idx_x + i) >= 0) && ((idx_x + i) < width) && ((idx_y + j) >= 0) && ((idx_y + j) < height))
{
int target = currIdx + j * width + i;
sum.x += (int)inputPixels[target].x;
sum.y += (int)inputPixels[target].y;
sum.z += (int)inputPixels[target].z;
count++;
}
}
}
outputPixels[currIdx].x = sum.x/count;
outputPixels[currIdx].y = sum.y/count;
outputPixels[currIdx].z = sum.z/count;
}
}
__global__ void imageFilterKernelPartB(char3* inputPixels, char3* outputPixels, uint width, uint height, int pxls_per_thrd, int num_thread)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx = 0; idx < pxls_per_thrd; idx++)
{
int currIdx = idx * num_thread + index;
int3 sum = {0, 0, 0};
int count = 0;
int idx_x = currIdx % width;
int idx_y = currIdx / width;
for(int i = -BOUND; i <= BOUND; i++)
{
for(int j = -BOUND; j <= BOUND; j++)
{
if(((idx_x + i) >= 0) && ((idx_x + i) < width) && ((idx_y + j) >= 0) && ((idx_y + j) < height))
{
int target = currIdx + j * width + i;
sum.x += (int)inputPixels[target].x;
sum.y += (int)inputPixels[target].y;
sum.z += (int)inputPixels[target].z;
count++;
}
}
}
outputPixels[currIdx].x = sum.x/count;
outputPixels[currIdx].y = sum.y/count;
outputPixels[currIdx].z = sum.z/count;
}
}
__global__ void imageFilterKernelPartC(char3* inputPixels, char3* outputPixels, uint width, uint height, int blocks_row, int blocks_col, int num_loops)
{
__shared__ char3 tile[128 * 128];
int shared_x = threadIdx.x % THREAD_PER_WARP;
int shared_y = threadIdx.x / THREAD_PER_WARP;
for(int i = 0; i < num_loops; i++)
{
int global_x = (blockIdx.x + i * 12) % blocks_row;
int global_y = (blockIdx.x + i * 12) / blocks_row;
for(int k = 0; k < BOUND; k++)
{
for(int j = 0; j < BOUND; j++)
{
int idx = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP;
int shared_idx = (shared_y + k * THREAD_PER_WARP) * 128 + shared_x + j * THREAD_PER_WARP;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
tile[shared_idx] = inputPixels[idx];
}
}
}
__syncthreads();
for(int k = 0; k < BOUND; k++)
{
for(int j = 0; j < BOUND; j++)
{
if((shared_x + j * THREAD_PER_WARP >= BOUND) && (shared_x + j * THREAD_PER_WARP <= 123) && (shared_y + k * THREAD_PER_WARP >= BOUND) && (shared_y + k * THREAD_PER_WARP <= 123))
{
int3 sum = {0, 0, 0};
int count = 0;
for(int dx = -BOUND; dx <= BOUND; dx++)
{
for(int dy = -BOUND; dy <= BOUND; dy++)
{
sum.x += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].x;
sum.y += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].y;
sum.z += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].z;
count++;
}
}
int out_idx = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
outputPixels[out_idx].x = sum.x / count;
outputPixels[out_idx].y = sum.y / count;
outputPixels[out_idx].z = sum.z / count;
}
}
/* Check whether the pixels are in the boundary with the width of 4 */
if( (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) <= 3
|| ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >= height-BOUND && (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
|| (global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) <= 3
|| ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= width-BOUND && (global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
int3 sum = {0, 0, 0};
int count = 0;
for(int dx = -BOUND; dx <= BOUND; dx++)
{
for(int dy = -BOUND; dy <= BOUND; dy++)
{
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP + dx) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP + dx) < width))
{
sum.x += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].x;
sum.y += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].y;
sum.z += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].z;
count++;
}
}
}
int out_boundary = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * 32;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
outputPixels[out_boundary].x = sum.x / count;
outputPixels[out_boundary].y = sum.y / count;
outputPixels[out_boundary].z = sum.z / count;
}
}
}
}
__syncthreads();
}
}
#endif // _IMAGEFILTER_KERNEL_H_
| 3d44d3f941424962642db17e8c757e26bf6959f0.cu | #ifndef _IMAGEFILTER_KERNEL_H_
#define _IMAGEFILTER_KERNEL_H_
#define BOUND 4
#define THREAD_PER_WARP 32
#define SQUARE_WIDTH 120
__global__ void imageFilterKernelPartA(char3* inputPixels, char3* outputPixels, uint width, uint height, int pxls_per_thrd)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx = 0; idx < pxls_per_thrd; idx ++){
int currIdx = index * pxls_per_thrd + idx;
int idx_x = currIdx % width;
int idx_y = currIdx / width;
int3 sum = {0, 0, 0};
int count = 0;
for(int i = -BOUND; i <= BOUND; i++)
{
for(int j = -BOUND; j <= BOUND; j++)
{
if(((idx_x + i) >= 0) && ((idx_x + i) < width) && ((idx_y + j) >= 0) && ((idx_y + j) < height))
{
int target = currIdx + j * width + i;
sum.x += (int)inputPixels[target].x;
sum.y += (int)inputPixels[target].y;
sum.z += (int)inputPixels[target].z;
count++;
}
}
}
outputPixels[currIdx].x = sum.x/count;
outputPixels[currIdx].y = sum.y/count;
outputPixels[currIdx].z = sum.z/count;
}
}
__global__ void imageFilterKernelPartB(char3* inputPixels, char3* outputPixels, uint width, uint height, int pxls_per_thrd, int num_thread)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx = 0; idx < pxls_per_thrd; idx++)
{
int currIdx = idx * num_thread + index;
int3 sum = {0, 0, 0};
int count = 0;
int idx_x = currIdx % width;
int idx_y = currIdx / width;
for(int i = -BOUND; i <= BOUND; i++)
{
for(int j = -BOUND; j <= BOUND; j++)
{
if(((idx_x + i) >= 0) && ((idx_x + i) < width) && ((idx_y + j) >= 0) && ((idx_y + j) < height))
{
int target = currIdx + j * width + i;
sum.x += (int)inputPixels[target].x;
sum.y += (int)inputPixels[target].y;
sum.z += (int)inputPixels[target].z;
count++;
}
}
}
outputPixels[currIdx].x = sum.x/count;
outputPixels[currIdx].y = sum.y/count;
outputPixels[currIdx].z = sum.z/count;
}
}
__global__ void imageFilterKernelPartC(char3* inputPixels, char3* outputPixels, uint width, uint height, int blocks_row, int blocks_col, int num_loops)
{
__shared__ char3 tile[128 * 128];
int shared_x = threadIdx.x % THREAD_PER_WARP;
int shared_y = threadIdx.x / THREAD_PER_WARP;
for(int i = 0; i < num_loops; i++)
{
int global_x = (blockIdx.x + i * 12) % blocks_row;
int global_y = (blockIdx.x + i * 12) / blocks_row;
for(int k = 0; k < BOUND; k++)
{
for(int j = 0; j < BOUND; j++)
{
int idx = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP;
int shared_idx = (shared_y + k * THREAD_PER_WARP) * 128 + shared_x + j * THREAD_PER_WARP;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
tile[shared_idx] = inputPixels[idx];
}
}
}
__syncthreads();
for(int k = 0; k < BOUND; k++)
{
for(int j = 0; j < BOUND; j++)
{
if((shared_x + j * THREAD_PER_WARP >= BOUND) && (shared_x + j * THREAD_PER_WARP <= 123) && (shared_y + k * THREAD_PER_WARP >= BOUND) && (shared_y + k * THREAD_PER_WARP <= 123))
{
int3 sum = {0, 0, 0};
int count = 0;
for(int dx = -BOUND; dx <= BOUND; dx++)
{
for(int dy = -BOUND; dy <= BOUND; dy++)
{
sum.x += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].x;
sum.y += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].y;
sum.z += (int)tile[(shared_y + k * THREAD_PER_WARP + dy) * 128 + (shared_x + dx) + j * THREAD_PER_WARP].z;
count++;
}
}
int out_idx = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
outputPixels[out_idx].x = sum.x / count;
outputPixels[out_idx].y = sum.y / count;
outputPixels[out_idx].z = sum.z / count;
}
}
/* Check whether the pixels are in the boundary with the width of 4 */
if( (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) <= 3
|| ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >= height-BOUND && (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
|| (global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) <= 3
|| ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= width-BOUND && (global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
int3 sum = {0, 0, 0};
int count = 0;
for(int dx = -BOUND; dx <= BOUND; dx++)
{
for(int dy = -BOUND; dy <= BOUND; dy++)
{
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP + dx) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP + dx) < width))
{
sum.x += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].x;
sum.y += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].y;
sum.z += (int)inputPixels[(global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP + dy) * width + (global_x * SQUARE_WIDTH + shared_x + dx) + j * THREAD_PER_WARP].z;
count++;
}
}
}
int out_boundary = (global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) * width + global_x * SQUARE_WIDTH + shared_x + j * 32;
if( ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) >=0)
&& ((global_y * SQUARE_WIDTH + shared_y + k * THREAD_PER_WARP) < height)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) >= 0)
&& ((global_x * SQUARE_WIDTH + shared_x + j * THREAD_PER_WARP) < width))
{
outputPixels[out_boundary].x = sum.x / count;
outputPixels[out_boundary].y = sum.y / count;
outputPixels[out_boundary].z = sum.z / count;
}
}
}
}
__syncthreads();
}
}
#endif // _IMAGEFILTER_KERNEL_H_
|
acf2f717d4e410e35c1a3aeddc9848938fabb6ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//#include <time.h>
//#include <cutil.h>
using namespace std;
# define r 40
# define M 1000 // number of items
# define N 90 // number of transactions
# define alpha 1 // represents the weight of the support in the first fitness function
# define Beta 1 // represents the weight of the confidence in the first fitness function
# define k 15 // number of bees
struct ligne {int trans[N]; int nb;} *lg;
struct bee {int solution[N]; float cost;} *be;
/**************prototype declaration*******/
void read_trans(ligne T[]);// this function allows to read the transactional data base et insert it into the dataset vector
void display_dataset(ligne T[]); //this function allows to display the transactional data base
void display_solution(bee S); // this function display the current solution with its cost
float support_rule(ligne T[], int s[]); // this function calculates the support of the entire solution s
float support_antecedent(ligne T[], int s[]); // this function computes the support of the antecedent of the solution s
float confidence(int sr, int sa); // it calculates the confidence of the rule
float fitness1(int sr, int sa); // computes the fitness of a given solution s
void create_Sref(bee *s, ligne V[]); // here we create the solution reference sref and initialize it with the random way
bee neighborhood_computation(bee S, bee *V, ligne *D);// this function explores the local region for each bee
void search_area1(bee s, bee *T, int iteration, ligne V [],int flip); //detremines the search area for each bee using the first strategy
void search_area2(bee s, bee *T, int iteration, ligne V[], int flip); //detremines the search area for each bee using the second strategy
void search_area3(bee s, bee *T, int iteration, ligne V[], int distance); //detremines the search area for each bee using the third strategy
int W(int t[]); // indicates the weight of solution representing by a vector t, this function is used on search_area3()
void copy(int t[], int v[]); // it copies the vector t in the vector v
int best_dance(bee *T); // return the best dance after the exploration of search region of each bee
void parallel_fitness(bee *V, ligne *D); // parallelize solution computing
void display_bees(bee T[]); // display solutions
/*************************************************************************************/
__global__ void KernelSupport_rules(bee *N_List_GPU, int **compt_GPU, struct ligne *dataset_GPU){
int thread_idx ;
thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
bool appartient=true;
int indice=blockIdx.x*1000;
indice=thread_idx-indice;
int j=1;
while (j<N){
if (N_List_GPU[blockIdx.x].solution[indice]!=0){
int l=0;
bool existe=false;
while (l< dataset_GPU[thread_idx].nb && existe==false){
if (dataset_GPU[thread_idx].trans[l]==j){
existe=true;
}
l++;
}
if (existe==false){
appartient=false;
}
}
j++;
}
if (appartient==true){
//compt_GPU[blockIdx.x][thread_idx]=1;
}
// }
}
__global__ void KernelSupport_antecedent(bee *N_List_GPU, int **compt_GPU, struct ligne *dataset_GPU){
int thread_idx ;
thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
bool appartient=true;
int indice=blockIdx.x*M;
indice=thread_idx-indice;
int j=1;
while (j<N){
if (N_List_GPU[blockIdx.x].solution[indice]==1){
int l=0;
bool existe=false;
while (l< dataset_GPU[thread_idx].nb && existe==false){
if (dataset_GPU[thread_idx].trans[l]==j){
existe=true;
}
l++;
}
if (existe==false){
appartient=false;
}
}
j++;
}
if (appartient==true){
//compt_GPU[blockIdx.x][thread_idx]=1;
}
//}
}
int main(void){
FILE *f=NULL;
f=fopen("/home/ydjenouri/mesprog/resultat1.txt","a");
struct ligne *dataset_CPU, *dataset_GPU;
struct bee *T_Dance;
struct bee *N_List_CPU;
struct bee Sref;
struct bee best;
int flip=1, distance, IMAX=1;
// int k=5;
hipEvent_t start, stop;
float elapsedTime;
int j;
/*****************************parallel program***********************/
//allocation de la memoire dans le CPU
dataset_CPU = (ligne *) malloc(M * sizeof(ligne)) ;
T_Dance = (bee *) malloc(k * sizeof(bee)) ;
N_List_CPU=(bee *) malloc(k *sizeof(bee));
////allocation de la memoire dans le GPU
hipMalloc( (void**) &dataset_GPU, M*sizeof(ligne));
//read transactional database and insert in the dataset_CPU
read_trans(dataset_CPU);
hipMemcpy(dataset_GPU, dataset_CPU, M * sizeof(ligne), hipMemcpyHostToDevice);
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 ) ;
create_Sref(&Sref, dataset_GPU); // creer une solution reference
//display_solution(Sref);
search_area1(Sref, T_Dance, IMAX, dataset_GPU, flip);
printf("hello");
// display_bees(T_Dance);
// for ( int k=5; k<=15;k=k+5)
for ( int i=0; i<=IMAX;i++)
{
for ( j=0;j<k;j++) // neighborhood computation for all the solution in tab
{
T_Dance[j]=neighborhood_computation(T_Dance[j], N_List_CPU, dataset_GPU);
}
/*j=best_dance(T_Dance,k);
copy(T_Dance[j].solution,Sref.solution);
Sref.cost=T_Dance[j].cost;
if (Sref.cost > best.cost)//atte o maximisation
{
copy(Sref.solution, best.solution);
best.cost=Sref.cost;
}
*/
//display_bees(T_Dance);
// //average=best.cost+average;
//printf("\nk="+b.k+" IMAX="+b.IMAX+" average fitness="+average);
search_area1(Sref,T_Dance, i, dataset_GPU, flip);
} //Bso ending
hipEventRecord( stop, 0 ) ;
hipEventSynchronize( stop ) ;
hipEventElapsedTime( &elapsedTime,start, stop ) ;
printf("K=%d IMAX=%d Execution Time in GPU : %3.1f ms\n", k,IMAX, elapsedTime );
// fprintf(f,"K=%d IMAX=%d flip=%d Execution Time in GPU : %3.1f ms\n", k,flip,IMAX, elapsedTime );
printf("Yes\n");
hipEventDestroy( start );
hipEventDestroy( stop );
//}// end loop IMAX
//} // end loop flip
//} // end loop k
//fclose(f);
hipFree(dataset_GPU);
return 0;
}
/**********************copry t in v********/
void copy(int t[], int v[])
{
for (int i=0;i<N; i++)
{
v[i]=t[i];
}
}
/*******read transactional data bass and insert it in the data set structure********************************/
void read_trans(ligne T[]){
char c='4';
char t[100];
int j;
int i=0;
int l=0;
FILE *f=NULL;
f=fopen("/home/ydjenouri/mesprog/T_90_1000.txt","r");
if (f!=NULL) {
//cout<<"the file is succefully opened"<<endl;
j=0;
while (c!=EOF){
c=fgetc(f);
if (c==' '){
t[j]='\0';
T[i].trans[l]=atoi(t);
l++;
j=0;
}
if (c=='\n'){
T[i].nb=l;
l=0;
i++;
j=0;
}
if (c!=' ' && c!='\n'){
t[j]=c;
j++;
}
}
fclose(f);
}
}
/*************************compute the support of the solution s**********/
float support_rule(ligne T[], int s[])
{
float compt=0;
for (int i=0; i<M; i++)
{
bool appartient=true;
int j=1;
while (j<N)
{
if (s[j]!=0)
{
int l=0;
bool existe=false;
while (l< T[i].nb && existe==false)
{
if (T[i].trans[l]==j)
{existe=true;}
l++;
}
if (existe==false){appartient=false;}
}
j++;
}
if (appartient==true) {compt++;}
}
compt=compt/M;
return compt;
}
/*****************************support antecedent computing*****************************/
float support_antecedent(ligne T[], int s[])
{
float compt=0;
for (int i=0; i<M; i++)
{
bool appartient=true;
int j=1;
while (j<N)
{
if (s[j]==1 ||s[j]==2)
{
int l=0;
bool existe=false;
while (l< T[i].nb && existe==false)
{
if (T[i].trans[l]==j)
{existe=true;}
l++;
}
if (existe==false){appartient=false;}
}
j++;
}
if (appartient==true) {compt++;}
}
compt=compt/M;
//if(compt!=0)System.out.println("antecedent"+compt);
return compt;
}
/****************************condifence computing**************************/
float confidence(int sr, int sa)
{
float conf=1;
conf=(float)sr/sa;
return conf;
}
/***********************evaluation of the solution s******/
float fitness1(int sr, int sa)
{
float cost=0;
//if (support_rule(sol)<Minsup || confidence(sol)<Minconf){cout=-1;}
float x=(float)alpha*(sr/M);
float y=(float)Beta*confidence(sr,sa);
cost=x+y;
return cost;
}
/**************************display_solution*****************/
void display_solution(bee S)
{
for (int i=0;i<N;i++)
{
printf("%d ", S.solution[i]);
}
printf ("cost is:%f",S.cost);
printf("\n");
}
/*********************create a solution reference Sref******************************************/
void create_Sref(bee *s, ligne V[])
{
for (int i=0;i<N;i++){
if (rand() % 2==0){
(*s).solution[i]=0 ;
}
else {
if (rand() % 2==0){
(*s).solution[i]=0;
}
else {
(*s).solution[i]=rand() % 3;
}
}
}
//parallel_fitness(s, V);
}
/***********************************negihborhood computation************************/
bee neighborhood_computation(bee S, bee *V, ligne *D)
{
bee s;
int indice=0;
int i=0;
bee neighbor, best_neighbor;
float best_cost=0;
//copy(S.solution,best_neighbor);
copy(S.solution,neighbor.solution);
while (i<k)
{
if (neighbor.solution[indice]==0)
{
if (rand()%2==0)
{neighbor.solution[indice]=1;}
else{neighbor.solution[indice]=2;}
}
else {
if (neighbor.solution[indice]==1)
{
if (rand()%2==0)
neighbor.solution[indice]=0;
else {
neighbor.solution[indice]=2;
}
}
else {
if (neighbor.solution[indice]==2)
{
if (rand()%2==0)
neighbor.solution[indice]=0;
else {
neighbor.solution[indice]=1;
}
}
}
}
indice++;
if (indice>=N){indice=0;}
copy(neighbor.solution,V[i].solution);
i++;
/*if (neighbor.cost>best_cost){copy(neighbor.solution,best_neighbor.solution);
best_cost=neighbor.cost;}*/
}
parallel_fitness(V, D);
//copy(best_neighbor.solution, s.solution);
//s.cost=best_cost;
s.cost=0;
return s;
}
/************************determination of search area********************/
void search_area1(bee s, bee *T, int iteration, ligne V[],int flip)
{
int indice=iteration % N;
int i=0;
while (i<k)
{
for (int j=0;j<N;j++)
{
T[i].solution[j]=s.solution[j];
}
if (T[i].solution[indice]==0)
{
if (iteration%4==0)
{T[i].solution[indice]=1;}
else{T[i].solution[indice]=2;}
// }
}
else{
if (T[i].solution[indice]==1)
{ if (iteration%3==0)
{T[i].solution[indice]=0;}
else{T[i].solution[indice]=2;}
}
else{
if (iteration%2==0)
{
T[i].solution[indice]=1;}
else{
T[i].solution[indice]=0;}
}
}
indice=indice+flip;
if (indice>=N){indice=0;}
parallel_fitness(&T[i], V);
//T_Dance[i].cost=fitness1(T_Dance[i].solution);//evaluer solution
i++;
}
}
/**************search 2*********************/
void search_area2(bee s, bee *T, int iteration, ligne V[], int flip)
{
int i=0;
int Nb_sol=0;
bool stop=false;
while (i<N && stop==false)
{
for (int j=0;j<N;j++)
{
T[Nb_sol].solution[j]=s.solution[j];
}
for (int l=i;l<(i+flip)%N;l++)
{
if ( T[Nb_sol].solution[l]==0)
{
if (rand()%2==1)
{ T[Nb_sol].solution[l]=1;}
else{T[Nb_sol].solution[l]=2;}
}
else {
if (T[Nb_sol].solution[l]==1)
{
if (rand()%2==1)
{T[Nb_sol].solution[l]=0;}
else{T[Nb_sol].solution[l]=2;}
}
else {
if (T[Nb_sol].solution[l]==2)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;}
else{T[Nb_sol].solution[l]=1;}
}
}
}
}
parallel_fitness(&T[i], V);
//T_Dance[Nb_sol].cost=fitness1(T_Dance[Nb_sol].solution); //evaluates the solution
Nb_sol++;
if (Nb_sol==k){stop=true;}
}
}
/********search3***************************/
int W(int t[])
{
int w=0;
for (int i=0;i<N; i++)
{
w=w+t[i];
}
return w;
}
/*******search 3 continued****************************/
void search_area3(bee s, bee *T, int iteration, ligne V[], int distance)
{
int Nb_sol=0;
while (Nb_sol!=k)
{
for (int j=0;j<N;j++)
{
T[Nb_sol].solution[j]=s.solution[j];
}
int l=0;
int cpt=0;
while (cpt<distance)
{
if (T[Nb_sol].solution[l]==0)
{
if (rand()%2==1)
{T[Nb_sol].solution[l]=1; cpt++;}
else{T[Nb_sol].solution[l]=2;cpt=cpt+2;}
}
else {
if (T[Nb_sol].solution[l]==1)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;cpt++;}
else{T[Nb_sol].solution[l]=2;cpt++;}
}
else {
if (T[Nb_sol].solution[l]==2)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;cpt=cpt+2;}
else{T[Nb_sol].solution[l]=1;cpt=cpt+1;}
}
}
}
l=(l+1)%N;
} //end the small while
//parallel_fitness(&T[Nb_sol], V);
//T_Dance[Nb_sol].cost=fitness1(T_Dance[Nb_sol].solution);//assecees the solution
Nb_sol++;
} // end the big while
}
/********************************best dance********************/
int best_dance(bee *T)
{
float max=T[0].cost;
int indice=0;
for (int i=1;i<k;i++)
{
if (T[i].cost>max)
{
max=T[i].cost;
indice=i;
}
}
return indice;
}
/***********************paralelize solution computing*******/
void parallel_fitness(bee *N_List_CPU, ligne V[])
{
bee *N_List_GPU;
//int **compt;
//compt = (int **) malloc(k*M*sizeof(int));
/*for (int i=0;i<k;i++){
for (int j=0;i<M;j++){
compt[i][j]=0;
}
}*/
int **compt_GPU;
// hipEventCreate( &start );
// hipEventCreate( &stop );
// hipEventRecord( start, 0 ) ;
hipMalloc((void**) &N_List_GPU, k*sizeof(bee));
//hipMalloc( (void**) &compt_GPU, k*M* sizeof(int));
hipMemcpy(N_List_GPU, N_List_CPU, k *sizeof(bee),hipMemcpyHostToDevice);
//hipMemcpy(compt_GPU, compt, k*M *sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( KernelSupport_rules), dim3(20*N),dim3(M), 0, 0, N_List_GPU, compt_GPU, V);
//hipMemcpy(compt, compt_GPU, k*M*sizeof(int),hipMemcpyDeviceToHost);
/*int sr=0;
for (int i=0;i<M;i++){
sr=sr+compt[i];
}
KernelSupport_antecedent<<<20*N,M>>>(s_GPU, compt_GPU, V);
hipMemcpy(compt, compt_GPU, M*sizeof(int),hipMemcpyDeviceToHost);
int sa=0;
for (int i=0;i<M;i++){
sa=sa+compt[i];
}
(*sol).cost=fitness1(sr,sa);*/
}
/*****************************display T_dance************/
void display_bees(bee T[])
{
//FILE *f=NULL;
//f=fopen("/home/ydjenouri/mesprog/resultat1.txt","a");
//if (f!=NULL) {
for (int i=0;i<k;i++)
{
for (int j=0;j<N;j++)
{
printf ("%d ",T[i].solution[j]);
}
printf("%f", T[i].cost);
printf("\n");
}
//fclose(f);
//}
}
| acf2f717d4e410e35c1a3aeddc9848938fabb6ce.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
//#include <time.h>
//#include <cutil.h>
using namespace std;
# define r 40
# define M 1000 // number of items
# define N 90 // number of transactions
# define alpha 1 // represents the weight of the support in the first fitness function
# define Beta 1 // represents the weight of the confidence in the first fitness function
# define k 15 // number of bees
struct ligne {int trans[N]; int nb;} *lg;
struct bee {int solution[N]; float cost;} *be;
/**************prototype declaration*******/
void read_trans(ligne T[]);// this function allows to read the transactional data base et insert it into the dataset vector
void display_dataset(ligne T[]); //this function allows to display the transactional data base
void display_solution(bee S); // this function display the current solution with its cost
float support_rule(ligne T[], int s[]); // this function calculates the support of the entire solution s
float support_antecedent(ligne T[], int s[]); // this function computes the support of the antecedent of the solution s
float confidence(int sr, int sa); // it calculates the confidence of the rule
float fitness1(int sr, int sa); // computes the fitness of a given solution s
void create_Sref(bee *s, ligne V[]); // here we create the solution reference sref and initialize it with the random way
bee neighborhood_computation(bee S, bee *V, ligne *D);// this function explores the local region for each bee
void search_area1(bee s, bee *T, int iteration, ligne V [],int flip); //detremines the search area for each bee using the first strategy
void search_area2(bee s, bee *T, int iteration, ligne V[], int flip); //detremines the search area for each bee using the second strategy
void search_area3(bee s, bee *T, int iteration, ligne V[], int distance); //detremines the search area for each bee using the third strategy
int W(int t[]); // indicates the weight of solution representing by a vector t, this function is used on search_area3()
void copy(int t[], int v[]); // it copies the vector t in the vector v
int best_dance(bee *T); // return the best dance after the exploration of search region of each bee
void parallel_fitness(bee *V, ligne *D); // parallelize solution computing
void display_bees(bee T[]); // display solutions
/*************************************************************************************/
__global__ void KernelSupport_rules(bee *N_List_GPU, int **compt_GPU, struct ligne *dataset_GPU){
int thread_idx ;
thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
bool appartient=true;
int indice=blockIdx.x*1000;
indice=thread_idx-indice;
int j=1;
while (j<N){
if (N_List_GPU[blockIdx.x].solution[indice]!=0){
int l=0;
bool existe=false;
while (l< dataset_GPU[thread_idx].nb && existe==false){
if (dataset_GPU[thread_idx].trans[l]==j){
existe=true;
}
l++;
}
if (existe==false){
appartient=false;
}
}
j++;
}
if (appartient==true){
//compt_GPU[blockIdx.x][thread_idx]=1;
}
// }
}
__global__ void KernelSupport_antecedent(bee *N_List_GPU, int **compt_GPU, struct ligne *dataset_GPU){
int thread_idx ;
thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
bool appartient=true;
int indice=blockIdx.x*M;
indice=thread_idx-indice;
int j=1;
while (j<N){
if (N_List_GPU[blockIdx.x].solution[indice]==1){
int l=0;
bool existe=false;
while (l< dataset_GPU[thread_idx].nb && existe==false){
if (dataset_GPU[thread_idx].trans[l]==j){
existe=true;
}
l++;
}
if (existe==false){
appartient=false;
}
}
j++;
}
if (appartient==true){
//compt_GPU[blockIdx.x][thread_idx]=1;
}
//}
}
int main(void){
FILE *f=NULL;
f=fopen("/home/ydjenouri/mesprog/resultat1.txt","a");
struct ligne *dataset_CPU, *dataset_GPU;
struct bee *T_Dance;
struct bee *N_List_CPU;
struct bee Sref;
struct bee best;
int flip=1, distance, IMAX=1;
// int k=5;
cudaEvent_t start, stop;
float elapsedTime;
int j;
/*****************************parallel program***********************/
//allocation de la memoire dans le CPU
dataset_CPU = (ligne *) malloc(M * sizeof(ligne)) ;
T_Dance = (bee *) malloc(k * sizeof(bee)) ;
N_List_CPU=(bee *) malloc(k *sizeof(bee));
////allocation de la memoire dans le GPU
cudaMalloc( (void**) &dataset_GPU, M*sizeof(ligne));
//read transactional database and insert in the dataset_CPU
read_trans(dataset_CPU);
cudaMemcpy(dataset_GPU, dataset_CPU, M * sizeof(ligne), cudaMemcpyHostToDevice);
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 ) ;
create_Sref(&Sref, dataset_GPU); // creer une solution reference
//display_solution(Sref);
search_area1(Sref, T_Dance, IMAX, dataset_GPU, flip);
printf("hello");
// display_bees(T_Dance);
// for ( int k=5; k<=15;k=k+5)
for ( int i=0; i<=IMAX;i++)
{
for ( j=0;j<k;j++) // neighborhood computation for all the solution in tab
{
T_Dance[j]=neighborhood_computation(T_Dance[j], N_List_CPU, dataset_GPU);
}
/*j=best_dance(T_Dance,k);
copy(T_Dance[j].solution,Sref.solution);
Sref.cost=T_Dance[j].cost;
if (Sref.cost > best.cost)//atte o maximisation
{
copy(Sref.solution, best.solution);
best.cost=Sref.cost;
}
*/
//display_bees(T_Dance);
// //average=best.cost+average;
//printf("\nk="+b.k+" IMAX="+b.IMAX+" average fitness="+average);
search_area1(Sref,T_Dance, i, dataset_GPU, flip);
} //Bso ending
cudaEventRecord( stop, 0 ) ;
cudaEventSynchronize( stop ) ;
cudaEventElapsedTime( &elapsedTime,start, stop ) ;
printf("K=%d IMAX=%d Execution Time in GPU : %3.1f ms\n", k,IMAX, elapsedTime );
// fprintf(f,"K=%d IMAX=%d flip=%d Execution Time in GPU : %3.1f ms\n", k,flip,IMAX, elapsedTime );
printf("Yes\n");
cudaEventDestroy( start );
cudaEventDestroy( stop );
//}// end loop IMAX
//} // end loop flip
//} // end loop k
//fclose(f);
cudaFree(dataset_GPU);
return 0;
}
/**********************copry t in v********/
void copy(int t[], int v[])
{
for (int i=0;i<N; i++)
{
v[i]=t[i];
}
}
/*******read transactional data bass and insert it in the data set structure********************************/
void read_trans(ligne T[]){
char c='4';
char t[100];
int j;
int i=0;
int l=0;
FILE *f=NULL;
f=fopen("/home/ydjenouri/mesprog/T_90_1000.txt","r");
if (f!=NULL) {
//cout<<"the file is succefully opened"<<endl;
j=0;
while (c!=EOF){
c=fgetc(f);
if (c==' '){
t[j]='\0';
T[i].trans[l]=atoi(t);
l++;
j=0;
}
if (c=='\n'){
T[i].nb=l;
l=0;
i++;
j=0;
}
if (c!=' ' && c!='\n'){
t[j]=c;
j++;
}
}
fclose(f);
}
}
/*************************compute the support of the solution s**********/
float support_rule(ligne T[], int s[])
{
float compt=0;
for (int i=0; i<M; i++)
{
bool appartient=true;
int j=1;
while (j<N)
{
if (s[j]!=0)
{
int l=0;
bool existe=false;
while (l< T[i].nb && existe==false)
{
if (T[i].trans[l]==j)
{existe=true;}
l++;
}
if (existe==false){appartient=false;}
}
j++;
}
if (appartient==true) {compt++;}
}
compt=compt/M;
return compt;
}
/*****************************support antecedent computing*****************************/
float support_antecedent(ligne T[], int s[])
{
float compt=0;
for (int i=0; i<M; i++)
{
bool appartient=true;
int j=1;
while (j<N)
{
if (s[j]==1 ||s[j]==2)
{
int l=0;
bool existe=false;
while (l< T[i].nb && existe==false)
{
if (T[i].trans[l]==j)
{existe=true;}
l++;
}
if (existe==false){appartient=false;}
}
j++;
}
if (appartient==true) {compt++;}
}
compt=compt/M;
//if(compt!=0)System.out.println("antecedent"+compt);
return compt;
}
/****************************condifence computing**************************/
float confidence(int sr, int sa)
{
float conf=1;
conf=(float)sr/sa;
return conf;
}
/***********************evaluation of the solution s******/
float fitness1(int sr, int sa)
{
float cost=0;
//if (support_rule(sol)<Minsup || confidence(sol)<Minconf){cout=-1;}
float x=(float)alpha*(sr/M);
float y=(float)Beta*confidence(sr,sa);
cost=x+y;
return cost;
}
/**************************display_solution*****************/
void display_solution(bee S)
{
for (int i=0;i<N;i++)
{
printf("%d ", S.solution[i]);
}
printf ("cost is:%f",S.cost);
printf("\n");
}
/*********************create a solution reference Sref******************************************/
void create_Sref(bee *s, ligne V[])
{
for (int i=0;i<N;i++){
if (rand() % 2==0){
(*s).solution[i]=0 ;
}
else {
if (rand() % 2==0){
(*s).solution[i]=0;
}
else {
(*s).solution[i]=rand() % 3;
}
}
}
//parallel_fitness(s, V);
}
/***********************************negihborhood computation************************/
bee neighborhood_computation(bee S, bee *V, ligne *D)
{
bee s;
int indice=0;
int i=0;
bee neighbor, best_neighbor;
float best_cost=0;
//copy(S.solution,best_neighbor);
copy(S.solution,neighbor.solution);
while (i<k)
{
if (neighbor.solution[indice]==0)
{
if (rand()%2==0)
{neighbor.solution[indice]=1;}
else{neighbor.solution[indice]=2;}
}
else {
if (neighbor.solution[indice]==1)
{
if (rand()%2==0)
neighbor.solution[indice]=0;
else {
neighbor.solution[indice]=2;
}
}
else {
if (neighbor.solution[indice]==2)
{
if (rand()%2==0)
neighbor.solution[indice]=0;
else {
neighbor.solution[indice]=1;
}
}
}
}
indice++;
if (indice>=N){indice=0;}
copy(neighbor.solution,V[i].solution);
i++;
/*if (neighbor.cost>best_cost){copy(neighbor.solution,best_neighbor.solution);
best_cost=neighbor.cost;}*/
}
parallel_fitness(V, D);
//copy(best_neighbor.solution, s.solution);
//s.cost=best_cost;
s.cost=0;
return s;
}
/************************determination of search area********************/
void search_area1(bee s, bee *T, int iteration, ligne V[],int flip)
{
int indice=iteration % N;
int i=0;
while (i<k)
{
for (int j=0;j<N;j++)
{
T[i].solution[j]=s.solution[j];
}
if (T[i].solution[indice]==0)
{
if (iteration%4==0)
{T[i].solution[indice]=1;}
else{T[i].solution[indice]=2;}
// }
}
else{
if (T[i].solution[indice]==1)
{ if (iteration%3==0)
{T[i].solution[indice]=0;}
else{T[i].solution[indice]=2;}
}
else{
if (iteration%2==0)
{
T[i].solution[indice]=1;}
else{
T[i].solution[indice]=0;}
}
}
indice=indice+flip;
if (indice>=N){indice=0;}
parallel_fitness(&T[i], V);
//T_Dance[i].cost=fitness1(T_Dance[i].solution);//evaluer solution
i++;
}
}
/**************search 2*********************/
void search_area2(bee s, bee *T, int iteration, ligne V[], int flip)
{
int i=0;
int Nb_sol=0;
bool stop=false;
while (i<N && stop==false)
{
for (int j=0;j<N;j++)
{
T[Nb_sol].solution[j]=s.solution[j];
}
for (int l=i;l<(i+flip)%N;l++)
{
if ( T[Nb_sol].solution[l]==0)
{
if (rand()%2==1)
{ T[Nb_sol].solution[l]=1;}
else{T[Nb_sol].solution[l]=2;}
}
else {
if (T[Nb_sol].solution[l]==1)
{
if (rand()%2==1)
{T[Nb_sol].solution[l]=0;}
else{T[Nb_sol].solution[l]=2;}
}
else {
if (T[Nb_sol].solution[l]==2)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;}
else{T[Nb_sol].solution[l]=1;}
}
}
}
}
parallel_fitness(&T[i], V);
//T_Dance[Nb_sol].cost=fitness1(T_Dance[Nb_sol].solution); //evaluates the solution
Nb_sol++;
if (Nb_sol==k){stop=true;}
}
}
/********search3***************************/
int W(int t[])
{
int w=0;
for (int i=0;i<N; i++)
{
w=w+t[i];
}
return w;
}
/*******search 3 continued****************************/
void search_area3(bee s, bee *T, int iteration, ligne V[], int distance)
{
int Nb_sol=0;
while (Nb_sol!=k)
{
for (int j=0;j<N;j++)
{
T[Nb_sol].solution[j]=s.solution[j];
}
int l=0;
int cpt=0;
while (cpt<distance)
{
if (T[Nb_sol].solution[l]==0)
{
if (rand()%2==1)
{T[Nb_sol].solution[l]=1; cpt++;}
else{T[Nb_sol].solution[l]=2;cpt=cpt+2;}
}
else {
if (T[Nb_sol].solution[l]==1)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;cpt++;}
else{T[Nb_sol].solution[l]=2;cpt++;}
}
else {
if (T[Nb_sol].solution[l]==2)
{
if (rand()%2==0)
{T[Nb_sol].solution[l]=0;cpt=cpt+2;}
else{T[Nb_sol].solution[l]=1;cpt=cpt+1;}
}
}
}
l=(l+1)%N;
} //end the small while
//parallel_fitness(&T[Nb_sol], V);
//T_Dance[Nb_sol].cost=fitness1(T_Dance[Nb_sol].solution);//assecees the solution
Nb_sol++;
} // end the big while
}
/********************************best dance********************/
int best_dance(bee *T)
{
float max=T[0].cost;
int indice=0;
for (int i=1;i<k;i++)
{
if (T[i].cost>max)
{
max=T[i].cost;
indice=i;
}
}
return indice;
}
/***********************paralelize solution computing*******/
void parallel_fitness(bee *N_List_CPU, ligne V[])
{
bee *N_List_GPU;
//int **compt;
//compt = (int **) malloc(k*M*sizeof(int));
/*for (int i=0;i<k;i++){
for (int j=0;i<M;j++){
compt[i][j]=0;
}
}*/
int **compt_GPU;
// cudaEventCreate( &start );
// cudaEventCreate( &stop );
// cudaEventRecord( start, 0 ) ;
cudaMalloc((void**) &N_List_GPU, k*sizeof(bee));
//cudaMalloc( (void**) &compt_GPU, k*M* sizeof(int));
cudaMemcpy(N_List_GPU, N_List_CPU, k *sizeof(bee),cudaMemcpyHostToDevice);
//cudaMemcpy(compt_GPU, compt, k*M *sizeof(int),cudaMemcpyHostToDevice);
KernelSupport_rules<<<20*N,M>>>(N_List_GPU, compt_GPU, V);
//cudaMemcpy(compt, compt_GPU, k*M*sizeof(int),cudaMemcpyDeviceToHost);
/*int sr=0;
for (int i=0;i<M;i++){
sr=sr+compt[i];
}
KernelSupport_antecedent<<<20*N,M>>>(s_GPU, compt_GPU, V);
cudaMemcpy(compt, compt_GPU, M*sizeof(int),cudaMemcpyDeviceToHost);
int sa=0;
for (int i=0;i<M;i++){
sa=sa+compt[i];
}
(*sol).cost=fitness1(sr,sa);*/
}
/*****************************display T_dance************/
void display_bees(bee T[])
{
//FILE *f=NULL;
//f=fopen("/home/ydjenouri/mesprog/resultat1.txt","a");
//if (f!=NULL) {
for (int i=0;i<k;i++)
{
for (int j=0;j<N;j++)
{
printf ("%d ",T[i].solution[j]);
}
printf("%f", T[i].cost);
printf("\n");
}
//fclose(f);
//}
}
|
461f3aab0b1acbc8f33e34d29d73432ee0769e42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// nvcc prof.cu
// nvprof --query-events
// nvprof --query-metrics
// nvprof -e divergent_branch,global_store_transaction,l1_shared_bank_conflict,l1_local_load_hit -m sm_efficiency ./a.out
//
// divergent_branch -
// global_store_transaction - -
// l1_shared_bank_conflict - -
// l1_local_load_hit -
// sm_efficiency -
__global__ void kernel(float *src, float *dst, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < n && idy < n)
dst[idx * n + idy] = src[idy * n + idx];
}
__global__ void kernel_shared(float *src, float *dst, int n) {
__shared__ float buff[32][32 + 1];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < n && idy < n)
buff[threadIdx.x][threadIdx.y] = src[idy * n + idx];
__syncthreads();
idx = blockIdx.x * blockDim.x + threadIdx.y;
idy = blockIdx.y * blockDim.y + threadIdx.x;
if (idx < n && idy < n)
dst[idx * n + idy] = buff[threadIdx.y][threadIdx.x];
}
int main() {
int i, n = 1024;
float *src = (float *)malloc(sizeof(float) * n * n);
float *dst = (float *)malloc(sizeof(float) * n * n);
for(i = 0; i < n * n; i++)
src[i] = i;
float *dev_src, *dev_dst;
hipMalloc(&dev_src, sizeof(float) * n * n);
hipMalloc(&dev_dst, sizeof(float) * n * n);
hipMemcpy(dev_src, src, sizeof(float) * n * n, hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( kernel_shared), dim3(dim3(32, 32)), dim3(dim3(32, 32)) , 0, 0, dev_src, dev_dst, n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
fprintf(stderr, "time = %f\n", time);
hipEventDestroy(stop);
hipEventDestroy(start);
hipMemcpy(dst, dev_dst, sizeof(float) * n * n, hipMemcpyDeviceToHost);
hipFree(dev_src);
hipFree(dev_dst);
for(i = 0; i < n * n; i++)
if (src[i] != dst[(i % n) * n + (i / n)]) {
fprintf(stderr, "Error\n");
break;
}
free(src);
free(dst);
return 0;
}
| 461f3aab0b1acbc8f33e34d29d73432ee0769e42.cu | #include <stdio.h>
// nvcc prof.cu
// nvprof --query-events
// nvprof --query-metrics
// nvprof -e divergent_branch,global_store_transaction,l1_shared_bank_conflict,l1_local_load_hit -m sm_efficiency ./a.out
//
// divergent_branch - дивергенция нитей
// global_store_transaction - кол-во транзакций к глобальной памяти
// l1_shared_bank_conflict - кол-во конфликтов банков памяти при работе с разделяемой памятью
// l1_local_load_hit - перенос переменных из регистровой памяти в локальную
// sm_efficiency - загрузка мультипроцессоров
__global__ void kernel(float *src, float *dst, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < n && idy < n)
dst[idx * n + idy] = src[idy * n + idx];
}
__global__ void kernel_shared(float *src, float *dst, int n) {
__shared__ float buff[32][32 + 1];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < n && idy < n)
buff[threadIdx.x][threadIdx.y] = src[idy * n + idx];
__syncthreads();
idx = blockIdx.x * blockDim.x + threadIdx.y;
idy = blockIdx.y * blockDim.y + threadIdx.x;
if (idx < n && idy < n)
dst[idx * n + idy] = buff[threadIdx.y][threadIdx.x];
}
int main() {
int i, n = 1024;
float *src = (float *)malloc(sizeof(float) * n * n);
float *dst = (float *)malloc(sizeof(float) * n * n);
for(i = 0; i < n * n; i++)
src[i] = i;
float *dev_src, *dev_dst;
cudaMalloc(&dev_src, sizeof(float) * n * n);
cudaMalloc(&dev_dst, sizeof(float) * n * n);
cudaMemcpy(dev_src, src, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_shared<<< dim3(32, 32), dim3(32, 32) >>>(dev_src, dev_dst, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "time = %f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
cudaMemcpy(dst, dev_dst, sizeof(float) * n * n, cudaMemcpyDeviceToHost);
cudaFree(dev_src);
cudaFree(dev_dst);
for(i = 0; i < n * n; i++)
if (src[i] != dst[(i % n) * n + (i / n)]) {
fprintf(stderr, "Error\n");
break;
}
free(src);
free(dst);
return 0;
}
|
87ebaf2bcdc51ab880d489a33df59493d83fe386.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void arrayTest(int n, long *factor, long *arr, long *result, int *const_arr1, long *const_arr2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i == 0) {
/*
printf("In ArrayTest n=%d factor=%p arr=%p result=%p \n",n,factor,arr,result);
printf("In const %d %d %d\n",const_arr1[0],const_arr1[1],const_arr1[2]);
printf("In const %ld %ld %ld\n",const_arr2[0],const_arr2[1],const_arr2[2]);
*/
}
if (i<n)
{
int idx = i * 3;
result[idx]=arr[idx] * factor[i];
result[idx + 1]=arr[idx + 1] * factor[i];
result[idx + 2]=arr[idx + 2] * factor[i];
/*
printf("ArrayTest [%ld] * [%ld %ld %ld] = [%ld %ld %ld] \n", factor[i],
arr[idx],arr[idx+1],arr[idx+2],
result[idx],result[idx+1],result[idx+2]);
*/
}
} | 87ebaf2bcdc51ab880d489a33df59493d83fe386.cu | #include "includes.h"
__global__ void arrayTest(int n, long *factor, long *arr, long *result, int *const_arr1, long *const_arr2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i == 0) {
/*
printf("In ArrayTest n=%d factor=%p arr=%p result=%p \n",n,factor,arr,result);
printf("In const %d %d %d\n",const_arr1[0],const_arr1[1],const_arr1[2]);
printf("In const %ld %ld %ld\n",const_arr2[0],const_arr2[1],const_arr2[2]);
*/
}
if (i<n)
{
int idx = i * 3;
result[idx]=arr[idx] * factor[i];
result[idx + 1]=arr[idx + 1] * factor[i];
result[idx + 2]=arr[idx + 2] * factor[i];
/*
printf("ArrayTest [%ld] * [%ld %ld %ld] = [%ld %ld %ld] \n", factor[i],
arr[idx],arr[idx+1],arr[idx+2],
result[idx],result[idx+1],result[idx+2]);
*/
}
} |
ce77bb90bb9c51fe98bd5b9852d60fe769689e51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <random>
// #include "3rdparty/cub-1.8.0/hipcub/hipcub.hpp"
#include "common.h"
#include "gptKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by GPT model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_gpt_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = token_seq_len
blockDim.x = hidden_size
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, token_seq_len]
output: result, [batch_size, token_seq_len, hidden_size]
real_seq_len: record seq len exclude padding, [batch_size]
padding_id, the padding_id, default 0
pos_offset: get real pos when decoding which gridDim.y=1
*/
template <typename T>
__global__ void ker_gpt_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
output[target_pos * blockDim.x + threadIdx.x] = 0.f;
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
output[target_pos * blockDim.x + threadIdx.x] =
token_emb[tid * blockDim.x + threadIdx.x] +
pos_emb[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x];
}
/* fp16 version */
template <>
__global__ void ker_gpt_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
output_h[target_pos * blockDim.x + threadIdx.x] = __float2half2_rn(0.f);
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
float2 te =
__half22float2(((const half2*)token_emb)[tid * blockDim.x + threadIdx.x]);
float2 pe = __half22float2(
((const half2*)
pos_emb)[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]);
te.x += pe.x;
te.y += pe.y;
output_h[target_pos * blockDim.x + threadIdx.x] = __float22half2_rn(te);
}
template <typename T>
void ker_gpt_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
hipLaunchKernelGGL(( ker_gpt_embedding<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(hidden_size), 0, stream,
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template <>
void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset) {
hipLaunchKernelGGL(( ker_gpt_embedding<__half>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(hidden_size / 2), 0, stream,
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template void ker_gpt_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* real_seq_len, int padding_id, int pos_offset);
template void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset);
/**
@brief: ker_correlation_softmax_gpt
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_correlation_softmax_gpt(T* correlation,
const int* real_seq_len,
const int batch_seq_len) {
int query_token_pos = blockIdx.y % batch_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos || threadIdx.x >= batch_seq_len) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * batch_seq_len + threadIdx.x;
float val = threadIdx.x < batch_seq_len ? (float)correlation[idx]
: CUDA_FLOAT_INF_NEG;
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(val - smax);
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
if (threadIdx.x < batch_seq_len) correlation[idx] = (T)(val / ssum);
}
template <typename T>
void ker_correlation_softmax_gpt_launcher(int batch_size, int batch_seq_len,
int head_num, hipStream_t stream,
T* correlation,
const int* real_seq_len) {
int block_dim = batch_seq_len;
if (batch_seq_len < 1024) {
block_dim = (batch_seq_len + 31) >> 5;
block_dim *= 32;
}
hipLaunchKernelGGL(( ker_correlation_softmax_gpt<T>)
, dim3(dim3(batch_size, head_num * batch_seq_len)), dim3(block_dim), 0, stream,
correlation, real_seq_len, batch_seq_len);
}
template void ker_correlation_softmax_gpt_launcher<float>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
float* correlation, const int* real_seq_len);
template void ker_correlation_softmax_gpt_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
__half* correlation, const int* real_seq_len);
/**
@brief: ker_attention_mask_weights
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * dst_seq_len
blockDim.x = src_seq_len
@param
correlation: [batch_size, head_num, dst_seq_len, src_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_attention_mask_weights(T* correlation,
const int* real_seq_len,
int dst_seq_len, int src_seq_len) {
int query_token_pos = blockIdx.y % dst_seq_len + src_seq_len - dst_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_attention_mask_weights_launcher(int batch_size, int dst_seq_len,
int src_seq_len, int head_num,
hipStream_t stream, T* correlation,
const int* real_seq_len) {
hipLaunchKernelGGL(( ker_attention_mask_weights<T>)
, dim3(dim3(batch_size, head_num * dst_seq_len)), dim3(src_seq_len), 0, stream,
correlation, real_seq_len, dst_seq_len, src_seq_len);
}
template void ker_attention_mask_weights_launcher<float>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
hipStream_t stream, float* correlation, const int* real_seq_len);
template void ker_attention_mask_weights_launcher<__half>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
hipStream_t stream, __half* correlation, const int* real_seq_len);
/**
@brief: ker_arrange_qkv_with_cache
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = hidden_size
@param
ori_qkv: [batch_size, 1, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: [batch_size, head_num, 1, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_qkv_with_cache(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* k_cache,
T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
T new_val;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = k_cache[old_target_id];
if (blockIdx.y == 2) new_val = v_cache[old_target_id];
} else {
new_val = ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x] +
__ldg(&qkv_bias[blockIdx.y * blockDim.x + threadIdx.x]);
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) new_q[target_id] = new_val;
if (blockIdx.y == 1) new_k[target_id] = new_val;
if (blockIdx.y == 2) new_v[target_id] = new_val;
}
template <>
__global__ void ker_arrange_qkv_with_cache<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
half2 new_val;
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
const half2* p_k_cache = (const half2*)k_cache;
const half2* p_v_cache = (const half2*)v_cache;
half2* p_new_q = (half2*)new_q;
half2* p_new_k = (half2*)new_k;
half2* p_new_v = (half2*)new_v;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = p_k_cache[old_target_id];
if (blockIdx.y == 2) new_val = p_v_cache[old_target_id];
} else {
new_val =
__hadd2(p_ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x],
__ldg(&p_bias[blockIdx.y * blockDim.x + threadIdx.x]));
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) p_new_q[target_id] = new_val;
if (blockIdx.y == 1) p_new_k[target_id] = new_val;
if (blockIdx.y == 2) p_new_v[target_id] = new_val;
}
template <typename T>
void ker_arrange_qkv_with_cache_launcher(int batch_token_num, int hidden_size,
hipStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* k_cache, T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
hipLaunchKernelGGL(( ker_arrange_qkv_with_cache<T>)
, dim3(dim3(batch_token_num, 3)), dim3(hidden_size), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim, batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
hipLaunchKernelGGL(( ker_arrange_qkv_with_cache<__half>)
, dim3(dim3(batch_token_num, 3)), dim3(hidden_size / 2), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim / 2, batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_qkv_with_cache_launcher<float>(
int batch_token_num, int hidden_size, hipStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* k_cache, float* new_v, float* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
template void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
/**
@brief: ker_ppl
compute ppl from logit
ppl = - (1 / n) * sum(log(i|i-1...))
one thread block compute log probability for the given token
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
logits: [batch_size, batch_seq_len, vocab_size]
input_ids: [batch_size, batch_seq_len]
real_seq_len: [batch_size]
ppl: [batch_size]
*/
template <typename T>
__global__ void ker_ppl(const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
int seq_len = real_seq_len[blockIdx.x]; // remove "eos"
if (blockIdx.y >= seq_len) {
// will not contribute to ppl
return;
}
int token_idx_in_batch = blockIdx.x * gridDim.y + blockIdx.y;
int left_logit_idx = token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (token_idx_in_batch + 1) * vocab_size;
/*
step 1. find max logit over the whole vocab
*/
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
max_logit = blockReduceMax(max_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
/*
step 2. compute the log probability for the given token,
add it to the sequence's ppl
*/
float sum_exp_logit = 0.f;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float lgt = fmaxf((float)logits[idx] - s_max_logit, logit_thresh_min);
sum_exp_logit += expf(lgt);
}
sum_exp_logit = blockReduceSum(sum_exp_logit);
if (threadIdx.x == 0) {
int token_id = input_ids[token_idx_in_batch + 1];
float log_prob =
((float)logits[token_idx_in_batch * vocab_size + token_id] -
s_max_logit - logf(sum_exp_logit)) /
(float)seq_len;
atomicAdd(ppl + blockIdx.x, -log_prob);
}
}
template <typename T>
void ker_ppl_launcher(int batch_size, int batch_seq_len,
int max_thread_per_block, hipStream_t stream,
const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
hipLaunchKernelGGL(( ker_ppl<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
logits, input_ids, real_seq_len, ppl, vocab_size);
}
template void ker_ppl_launcher<float>(int batch_size, int batch_seq_len,
int max_thread_per_block,
hipStream_t stream, const float* logits,
const int* input_ids,
const int* real_seq_len, float* ppl,
int vocab_size);
template void ker_ppl_launcher<__half>(
int batch_size, int batch_seq_len, int max_thread_per_block,
hipStream_t stream, const __half* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size);
/**
@brief: ker_topk_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
int last_token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[last_token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
// blockIdx.x * (batch_seq_len+1) + batch_seq_len
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(rough_top_kth_logit, (float)logits[idx]);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
__shared__ int s_tid;
if (k != 1) {
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
} else {
s_tid = vocab_size;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
if (logit == s_max_logit) {
s_tid = idx - left_logit_idx + threadIdx.x;
}
}
__syncthreads();
}
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
hipStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
if (k == 1)
hipLaunchKernelGGL(( ker_topk_sample<T, 1>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
hipLaunchKernelGGL(( ker_topk_sample<T, 2>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
hipLaunchKernelGGL(( ker_topk_sample<T, 4>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
hipLaunchKernelGGL(( ker_topk_sample<T, 8>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
hipLaunchKernelGGL(( ker_topk_sample<T, 16>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
hipLaunchKernelGGL(( ker_topk_sample<T, 32>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
/**
@brief: ker_topp_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished, float p,
hiprandState_t* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit in each thread and sample from these probs with nucleus
sampling
*/
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
hipStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
hipLaunchKernelGGL(( ker_topp_sample<T>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
} // namespace cuda
} // namespace lightseq
| ce77bb90bb9c51fe98bd5b9852d60fe769689e51.cu | #include <random>
// #include "3rdparty/cub-1.8.0/cub/cub.cuh"
#include "common.h"
#include "gptKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by GPT model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_gpt_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = token_seq_len
blockDim.x = hidden_size
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, token_seq_len]
output: result, [batch_size, token_seq_len, hidden_size]
real_seq_len: record seq len exclude padding, [batch_size]
padding_id, the padding_id, default 0
pos_offset: get real pos when decoding which gridDim.y=1
*/
template <typename T>
__global__ void ker_gpt_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
output[target_pos * blockDim.x + threadIdx.x] = 0.f;
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
output[target_pos * blockDim.x + threadIdx.x] =
token_emb[tid * blockDim.x + threadIdx.x] +
pos_emb[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x];
}
/* fp16 version */
template <>
__global__ void ker_gpt_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
output_h[target_pos * blockDim.x + threadIdx.x] = __float2half2_rn(0.f);
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
float2 te =
__half22float2(((const half2*)token_emb)[tid * blockDim.x + threadIdx.x]);
float2 pe = __half22float2(
((const half2*)
pos_emb)[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]);
te.x += pe.x;
te.y += pe.y;
output_h[target_pos * blockDim.x + threadIdx.x] = __float22half2_rn(te);
}
template <typename T>
void ker_gpt_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
ker_gpt_embedding<T>
<<<dim3(batch_size, batch_seq_len), hidden_size, 0, stream>>>(
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template <>
void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset) {
ker_gpt_embedding<__half>
<<<dim3(batch_size, batch_seq_len), hidden_size / 2, 0, stream>>>(
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template void ker_gpt_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* real_seq_len, int padding_id, int pos_offset);
template void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset);
/**
@brief: ker_correlation_softmax_gpt
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_correlation_softmax_gpt(T* correlation,
const int* real_seq_len,
const int batch_seq_len) {
int query_token_pos = blockIdx.y % batch_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos || threadIdx.x >= batch_seq_len) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * batch_seq_len + threadIdx.x;
float val = threadIdx.x < batch_seq_len ? (float)correlation[idx]
: CUDA_FLOAT_INF_NEG;
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(val - smax);
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
if (threadIdx.x < batch_seq_len) correlation[idx] = (T)(val / ssum);
}
template <typename T>
void ker_correlation_softmax_gpt_launcher(int batch_size, int batch_seq_len,
int head_num, cudaStream_t stream,
T* correlation,
const int* real_seq_len) {
int block_dim = batch_seq_len;
if (batch_seq_len < 1024) {
block_dim = (batch_seq_len + 31) >> 5;
block_dim *= 32;
}
ker_correlation_softmax_gpt<T>
<<<dim3(batch_size, head_num * batch_seq_len), block_dim, 0, stream>>>(
correlation, real_seq_len, batch_seq_len);
}
template void ker_correlation_softmax_gpt_launcher<float>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
float* correlation, const int* real_seq_len);
template void ker_correlation_softmax_gpt_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
__half* correlation, const int* real_seq_len);
/**
@brief: ker_attention_mask_weights
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * dst_seq_len
blockDim.x = src_seq_len
@param
correlation: [batch_size, head_num, dst_seq_len, src_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_attention_mask_weights(T* correlation,
const int* real_seq_len,
int dst_seq_len, int src_seq_len) {
int query_token_pos = blockIdx.y % dst_seq_len + src_seq_len - dst_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_attention_mask_weights_launcher(int batch_size, int dst_seq_len,
int src_seq_len, int head_num,
cudaStream_t stream, T* correlation,
const int* real_seq_len) {
ker_attention_mask_weights<T>
<<<dim3(batch_size, head_num * dst_seq_len), src_seq_len, 0, stream>>>(
correlation, real_seq_len, dst_seq_len, src_seq_len);
}
template void ker_attention_mask_weights_launcher<float>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
cudaStream_t stream, float* correlation, const int* real_seq_len);
template void ker_attention_mask_weights_launcher<__half>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
cudaStream_t stream, __half* correlation, const int* real_seq_len);
/**
@brief: ker_arrange_qkv_with_cache
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = hidden_size
@param
ori_qkv: [batch_size, 1, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: [batch_size, head_num, 1, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_qkv_with_cache(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* k_cache,
T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
T new_val;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = k_cache[old_target_id];
if (blockIdx.y == 2) new_val = v_cache[old_target_id];
} else {
new_val = ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x] +
__ldg(&qkv_bias[blockIdx.y * blockDim.x + threadIdx.x]);
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) new_q[target_id] = new_val;
if (blockIdx.y == 1) new_k[target_id] = new_val;
if (blockIdx.y == 2) new_v[target_id] = new_val;
}
template <>
__global__ void ker_arrange_qkv_with_cache<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
half2 new_val;
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
const half2* p_k_cache = (const half2*)k_cache;
const half2* p_v_cache = (const half2*)v_cache;
half2* p_new_q = (half2*)new_q;
half2* p_new_k = (half2*)new_k;
half2* p_new_v = (half2*)new_v;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = p_k_cache[old_target_id];
if (blockIdx.y == 2) new_val = p_v_cache[old_target_id];
} else {
new_val =
__hadd2(p_ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x],
__ldg(&p_bias[blockIdx.y * blockDim.x + threadIdx.x]));
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) p_new_q[target_id] = new_val;
if (blockIdx.y == 1) p_new_k[target_id] = new_val;
if (blockIdx.y == 2) p_new_v[target_id] = new_val;
}
template <typename T>
void ker_arrange_qkv_with_cache_launcher(int batch_token_num, int hidden_size,
cudaStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* k_cache, T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
ker_arrange_qkv_with_cache<T>
<<<dim3(batch_token_num, 3), hidden_size, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim, batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
ker_arrange_qkv_with_cache<__half>
<<<dim3(batch_token_num, 3), hidden_size / 2, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim / 2, batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_qkv_with_cache_launcher<float>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* k_cache, float* new_v, float* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
template void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
/**
@brief: ker_ppl
compute ppl from logit
ppl = - (1 / n) * sum(log(i|i-1...))
one thread block compute log probability for the given token
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
logits: [batch_size, batch_seq_len, vocab_size]
input_ids: [batch_size, batch_seq_len]
real_seq_len: [batch_size]
ppl: [batch_size]
*/
template <typename T>
__global__ void ker_ppl(const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
int seq_len = real_seq_len[blockIdx.x]; // remove "eos"
if (blockIdx.y >= seq_len) {
// will not contribute to ppl
return;
}
int token_idx_in_batch = blockIdx.x * gridDim.y + blockIdx.y;
int left_logit_idx = token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (token_idx_in_batch + 1) * vocab_size;
/*
step 1. find max logit over the whole vocab
*/
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
max_logit = blockReduceMax(max_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
/*
step 2. compute the log probability for the given token,
add it to the sequence's ppl
*/
float sum_exp_logit = 0.f;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float lgt = fmaxf((float)logits[idx] - s_max_logit, logit_thresh_min);
sum_exp_logit += expf(lgt);
}
sum_exp_logit = blockReduceSum(sum_exp_logit);
if (threadIdx.x == 0) {
int token_id = input_ids[token_idx_in_batch + 1];
float log_prob =
((float)logits[token_idx_in_batch * vocab_size + token_id] -
s_max_logit - logf(sum_exp_logit)) /
(float)seq_len;
atomicAdd(ppl + blockIdx.x, -log_prob);
}
}
template <typename T>
void ker_ppl_launcher(int batch_size, int batch_seq_len,
int max_thread_per_block, cudaStream_t stream,
const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
ker_ppl<T>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
logits, input_ids, real_seq_len, ppl, vocab_size);
}
template void ker_ppl_launcher<float>(int batch_size, int batch_seq_len,
int max_thread_per_block,
cudaStream_t stream, const float* logits,
const int* input_ids,
const int* real_seq_len, float* ppl,
int vocab_size);
template void ker_ppl_launcher<__half>(
int batch_size, int batch_seq_len, int max_thread_per_block,
cudaStream_t stream, const __half* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size);
/**
@brief: ker_topk_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished,
curandState* curandstate, int eos_id) {
int last_token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[last_token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
// blockIdx.x * (batch_seq_len+1) + batch_seq_len
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(rough_top_kth_logit, (float)logits[idx]);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
__shared__ int s_tid;
if (k != 1) {
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
} else {
s_tid = vocab_size;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
if (logit == s_max_logit) {
s_tid = idx - left_logit_idx + threadIdx.x;
}
}
__syncthreads();
}
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
cudaStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const int k, int* unfinished,
curandState* curandstate, int eos_id) {
if (k == 1)
ker_topk_sample<T, 1><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
ker_topk_sample<T, 2><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
ker_topk_sample<T, 4><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
ker_topk_sample<T, 8><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
ker_topk_sample<T, 16><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
ker_topk_sample<T, 32><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
/**
@brief: ker_topp_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished, float p,
curandState* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit in each thread and sample from these probs with nucleus
sampling
*/
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
cudaStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const float p, int* unfinished,
curandState* curandstate, int eos_id) {
ker_topp_sample<T><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
} // namespace cuda
} // namespace lightseq
|
c8973d7730d60d1d6f038f79192914b5698e2e02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel(unsigned char *ptr, int ticks){
// map from threadIdx/BlockIdx to pixel positions
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char) (128.0f + 127.0f * cos(d/10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
} | c8973d7730d60d1d6f038f79192914b5698e2e02.cu | #include "includes.h"
__global__ void kernel(unsigned char *ptr, int ticks){
// map from threadIdx/BlockIdx to pixel positions
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char) (128.0f + 127.0f * cos(d/10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
} |
2d2a29881a68e4dd6abaec6115ea7c55863f8d0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
} | 2d2a29881a68e4dd6abaec6115ea7c55863f8d0a.cu | #include "includes.h"
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
} |
85de104191e06b46a68d6a262735cc2573a430b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copySharedMem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
hipMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copySharedMem), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copySharedMem), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copySharedMem), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 85de104191e06b46a68d6a262735cc2573a430b8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copySharedMem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
cudaMalloc(&odata, XSIZE*YSIZE);
float *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copySharedMem<<<gridBlock,threadBlock>>>(odata,idata,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copySharedMem<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copySharedMem<<<gridBlock,threadBlock>>>(odata,idata,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8768b19260157f3ea786f31bea1c2f5bd95701e5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "../separable-convolution/separable_convolution.h"
#include "../motion-tracking/motion_tracking.h"
#include "../edge-detect/edge_detect.h"
#include "../helper/helper_cuda.h"
using namespace cv;
#define TX 16
#define TY 16
void compare_naive_separable_convolution(const char **images, int num_images) {
// Benchmark serial (CPU) performance of naive convolution versus separated convolution on a fixed filter and randomly-valued input
for (int image_index = 0; image_index < num_images; image_index++) {
Mat image = imread(images[image_index], 0);
int input_width = image.cols;
int input_height = image.rows;
int *out = (int *)malloc((input_width + 2) * (input_height + 2) * sizeof(int));
int h[9] = {1, 2, 1, 2, 4, 2, 1, 2, 1};
int h_horizontal[3] = {1, 2, 1};
int h_vertical[3] = {1, 2, 1};
int *x = (int *)malloc(input_width * input_height * sizeof(int));
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
x[i * image.cols + j] = image.at<uchar>(i, j);
}
}
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
serial_naive_convolve(out, x, h, input_width, input_height, 3, 3);
gettimeofday(&tv2, NULL);
double naive_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_separable_convolve(out, x, h_horizontal, h_vertical, input_width, input_height, 3, 3, 1.0);
gettimeofday(&tv2, NULL);
double separable_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
printf("Test image: %s\n", images[image_index]);
printf("Serial naive convolution execution time: %f\n", naive_computation_time);
printf("Serial separable convolution execution time: %f\n", separable_computation_time);
}
}
void compare_separable_convolution_speedup(const char **images, int num_images) {
// Benchmark parallel speedup of Gaussian filter - serial CPU separated convolution vs. parallel GPU separated convolution
for (int image_index = 0; image_index < num_images; image_index++) {
Mat image = imread(images[image_index], 0);
int input_width = image.cols;
int input_height = image.rows;
int *out = (int *)malloc((input_width + 2) * (input_height + 2) * sizeof(int));
int h_horizontal[3] = {1, 2, 1};
int h_vertical[3] = {1, 2, 1};
int *x = (int *)malloc(input_width * input_height * sizeof(int));
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
x[i * image.cols + j] = image.at<uchar>(i, j);
}
}
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
separable_convolve(out, x, input_width, input_height, h_horizontal, h_vertical, 3, 1.0);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_separable_convolve(out, x, h_horizontal, h_vertical, input_width, input_height, 3, 3, 1.0);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
double estimated_speedup = serial_computation_time/parallel_computation_time;
printf("Test image: %s\n", images[image_index]);
printf("Parallel separable convolution execution time: %f\n", parallel_computation_time);
printf("Serial separable convolution execution time: %f\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", estimated_speedup);
}
}
void non_maximum_suppression_selective_thresholding_speedup(const char **images, int num_images) {
int high_threshold = 70;
int low_threshold = 50;
for (int image_index = 0; image_index < num_images; image_index++) {
Mat raw_image = imread(images[image_index], 0);
int *image = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
for (int i = 0; i < raw_image.rows; i++) {
for (int j = 0; j < raw_image.cols; j++) {
image[i * raw_image.cols + j] = raw_image.at<uchar>(i, j);
}
}
int *gx_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int *gy_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int *edges_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int kernel_size = 3;
int sobel_out_width = raw_image.cols + kernel_size - 1;
int sobel_out_height = raw_image.rows + kernel_size - 1;
dim3 block_size(TX, TY);
int bx = sobel_out_width/block_size.x;
int by = sobel_out_height/block_size.y;
dim3 grid_size = dim3(bx, by);
// Horizontal direction
int gx_horizontal[3] = {1, 0, -1};
int gx_vertical[3] = {1, 2, 1};
separable_convolve(gx_out, image, sobel_out_width, sobel_out_height, gx_horizontal, gx_vertical, 3, 1);
// Vertical direction
int gy_horizontal[3] = {1, 2, 1};
int gy_vertical[3] = {1, 0, -1};
separable_convolve(gy_out, image, sobel_out_width, sobel_out_height, gy_horizontal, gy_vertical, 3, 1);
int *dev_edges, *dev_gx, *dev_gy;
double *dev_magnitude, *dev_angle;
// Allocate GPU memory space for partial derivatives
checkCudaErrors(hipMalloc(&dev_magnitude, sobel_out_width * sobel_out_height * sizeof(double)));
checkCudaErrors(hipMalloc(&dev_angle, sobel_out_width * sobel_out_height * sizeof(double)));
checkCudaErrors(hipMalloc(&dev_edges, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(hipMalloc(&dev_gx, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(hipMalloc(&dev_gy, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(hipMemcpy(dev_gx, gx_out, sobel_out_width * sobel_out_height * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dev_gy, gy_out, sobel_out_width * sobel_out_height * sizeof(int), hipMemcpyHostToDevice));
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
gradient_magnitude_angle_thresholding_and_suppresion(dev_magnitude, dev_angle, sobel_out_width, sobel_out_height, dev_gx, dev_gy, dev_edges, edges_out, high_threshold, low_threshold, grid_size, block_size);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_thresholding_and_suppression(edges_out, sobel_out_width, sobel_out_height, gx_out, gy_out, high_threshold, low_threshold);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
checkCudaErrors(hipFree(dev_edges));
checkCudaErrors(hipFree(dev_gx));
checkCudaErrors(hipFree(dev_gy));
checkCudaErrors(hipFree(dev_magnitude));
checkCudaErrors(hipFree(dev_angle));
printf("Test image: %s\n", images[image_index]);
printf("Parallel non-maximum suppression and selective thresholding execution time: %f seconds\n", parallel_computation_time);
printf("Serial non-maximum suppression and selective thresholding execution time: %f seconds\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", serial_computation_time/parallel_computation_time);
}
}
void motion_area_estimation_speedup(const char **images, int num_images) {
// Uses random matrices for the difference
double movement_threshold = 5.0;
int motion_threshold = 4;
int horizontal_divisions = 5;
int vertical_divisions = 5;
struct timeval tv1, tv2;
for (int image_index = 0; image_index < num_images; image_index++) {
Mat raw_image = imread(images[image_index], 0);
int *motion_area = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *difference = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *edges_1 = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *edges_2 = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
for (int i = 0; i < raw_image.cols * raw_image.rows; i++) {
edges_1[i] = rand() % 2;
edges_2[i] = rand() % 2;
}
gettimeofday(&tv1, NULL);
motion_detect(motion_area, difference, edges_1, edges_2, raw_image.cols, raw_image.rows, movement_threshold, motion_threshold, horizontal_divisions, vertical_divisions);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_motion_detect(motion_area, difference, edges_1, edges_2, raw_image.cols, raw_image.rows, movement_threshold, motion_threshold, horizontal_divisions, vertical_divisions);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
printf("Test image: %s\n", images[image_index]);
printf("Parallel motion area estimation execution time: %f seconds\n", parallel_computation_time);
printf("Serial motion area estimation execution time: %f seconds\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", serial_computation_time/parallel_computation_time);
}
}
int main() {
const char *images[9];
images[0] = "../../images/city_100_100.jpg";
images[1] = "../../images/city_500_500.jpg";
images[2] = "../../images/city_1000_1000.jpg";
images[3] = "../../images/city_2000_2000.jpg";
images[4] = "../../images/city_3000_3000.jpg";
images[5] = "../../images/city_4000_4000.jpg";
images[6] = "../../images/city_5000_5000.jpg";
images[7] = "../../images/city_6000_6000.jpg";
images[8] = "../../images/city_7500_7500.jpg";
srand(time(NULL));
printf("==========SERIAL NAIVE VS SEPARABLE CONVOLUTION COMPARISON==========\n");
compare_naive_separable_convolution(images, 9);
printf("==========CPU VS GPU SEPARABLE CONVOLUTION SPEEDUP==========\n");
compare_separable_convolution_speedup(images, 9);
printf("==========CPU VS GPU NON-MAXIMUM SUPPRESSION AND SELECTIVE THRESHOLDING SPEEDUP==========\n");
non_maximum_suppression_selective_thresholding_speedup(images, 8);
printf("==========CPU VS GPU MOTION AREA ESTIMATION SPEEDUP==========\n");
motion_area_estimation_speedup(images, 9);
return 0;
}
| 8768b19260157f3ea786f31bea1c2f5bd95701e5.cu | #include <stdio.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "../separable-convolution/separable_convolution.h"
#include "../motion-tracking/motion_tracking.h"
#include "../edge-detect/edge_detect.h"
#include "../helper/helper_cuda.h"
using namespace cv;
#define TX 16
#define TY 16
void compare_naive_separable_convolution(const char **images, int num_images) {
// Benchmark serial (CPU) performance of naive convolution versus separated convolution on a fixed filter and randomly-valued input
for (int image_index = 0; image_index < num_images; image_index++) {
Mat image = imread(images[image_index], 0);
int input_width = image.cols;
int input_height = image.rows;
int *out = (int *)malloc((input_width + 2) * (input_height + 2) * sizeof(int));
int h[9] = {1, 2, 1, 2, 4, 2, 1, 2, 1};
int h_horizontal[3] = {1, 2, 1};
int h_vertical[3] = {1, 2, 1};
int *x = (int *)malloc(input_width * input_height * sizeof(int));
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
x[i * image.cols + j] = image.at<uchar>(i, j);
}
}
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
serial_naive_convolve(out, x, h, input_width, input_height, 3, 3);
gettimeofday(&tv2, NULL);
double naive_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_separable_convolve(out, x, h_horizontal, h_vertical, input_width, input_height, 3, 3, 1.0);
gettimeofday(&tv2, NULL);
double separable_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
printf("Test image: %s\n", images[image_index]);
printf("Serial naive convolution execution time: %f\n", naive_computation_time);
printf("Serial separable convolution execution time: %f\n", separable_computation_time);
}
}
void compare_separable_convolution_speedup(const char **images, int num_images) {
// Benchmark parallel speedup of Gaussian filter - serial CPU separated convolution vs. parallel GPU separated convolution
for (int image_index = 0; image_index < num_images; image_index++) {
Mat image = imread(images[image_index], 0);
int input_width = image.cols;
int input_height = image.rows;
int *out = (int *)malloc((input_width + 2) * (input_height + 2) * sizeof(int));
int h_horizontal[3] = {1, 2, 1};
int h_vertical[3] = {1, 2, 1};
int *x = (int *)malloc(input_width * input_height * sizeof(int));
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
x[i * image.cols + j] = image.at<uchar>(i, j);
}
}
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
separable_convolve(out, x, input_width, input_height, h_horizontal, h_vertical, 3, 1.0);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_separable_convolve(out, x, h_horizontal, h_vertical, input_width, input_height, 3, 3, 1.0);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
double estimated_speedup = serial_computation_time/parallel_computation_time;
printf("Test image: %s\n", images[image_index]);
printf("Parallel separable convolution execution time: %f\n", parallel_computation_time);
printf("Serial separable convolution execution time: %f\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", estimated_speedup);
}
}
void non_maximum_suppression_selective_thresholding_speedup(const char **images, int num_images) {
int high_threshold = 70;
int low_threshold = 50;
for (int image_index = 0; image_index < num_images; image_index++) {
Mat raw_image = imread(images[image_index], 0);
int *image = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
for (int i = 0; i < raw_image.rows; i++) {
for (int j = 0; j < raw_image.cols; j++) {
image[i * raw_image.cols + j] = raw_image.at<uchar>(i, j);
}
}
int *gx_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int *gy_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int *edges_out = (int *)malloc((raw_image.cols + 4) * (raw_image.rows + 4) * sizeof(int));
int kernel_size = 3;
int sobel_out_width = raw_image.cols + kernel_size - 1;
int sobel_out_height = raw_image.rows + kernel_size - 1;
dim3 block_size(TX, TY);
int bx = sobel_out_width/block_size.x;
int by = sobel_out_height/block_size.y;
dim3 grid_size = dim3(bx, by);
// Horizontal direction
int gx_horizontal[3] = {1, 0, -1};
int gx_vertical[3] = {1, 2, 1};
separable_convolve(gx_out, image, sobel_out_width, sobel_out_height, gx_horizontal, gx_vertical, 3, 1);
// Vertical direction
int gy_horizontal[3] = {1, 2, 1};
int gy_vertical[3] = {1, 0, -1};
separable_convolve(gy_out, image, sobel_out_width, sobel_out_height, gy_horizontal, gy_vertical, 3, 1);
int *dev_edges, *dev_gx, *dev_gy;
double *dev_magnitude, *dev_angle;
// Allocate GPU memory space for partial derivatives
checkCudaErrors(cudaMalloc(&dev_magnitude, sobel_out_width * sobel_out_height * sizeof(double)));
checkCudaErrors(cudaMalloc(&dev_angle, sobel_out_width * sobel_out_height * sizeof(double)));
checkCudaErrors(cudaMalloc(&dev_edges, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(cudaMalloc(&dev_gx, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(cudaMalloc(&dev_gy, sobel_out_width * sobel_out_height * sizeof(int)));
checkCudaErrors(cudaMemcpy(dev_gx, gx_out, sobel_out_width * sobel_out_height * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_gy, gy_out, sobel_out_width * sobel_out_height * sizeof(int), cudaMemcpyHostToDevice));
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
gradient_magnitude_angle_thresholding_and_suppresion(dev_magnitude, dev_angle, sobel_out_width, sobel_out_height, dev_gx, dev_gy, dev_edges, edges_out, high_threshold, low_threshold, grid_size, block_size);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_thresholding_and_suppression(edges_out, sobel_out_width, sobel_out_height, gx_out, gy_out, high_threshold, low_threshold);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
checkCudaErrors(cudaFree(dev_edges));
checkCudaErrors(cudaFree(dev_gx));
checkCudaErrors(cudaFree(dev_gy));
checkCudaErrors(cudaFree(dev_magnitude));
checkCudaErrors(cudaFree(dev_angle));
printf("Test image: %s\n", images[image_index]);
printf("Parallel non-maximum suppression and selective thresholding execution time: %f seconds\n", parallel_computation_time);
printf("Serial non-maximum suppression and selective thresholding execution time: %f seconds\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", serial_computation_time/parallel_computation_time);
}
}
void motion_area_estimation_speedup(const char **images, int num_images) {
// Uses random matrices for the difference
double movement_threshold = 5.0;
int motion_threshold = 4;
int horizontal_divisions = 5;
int vertical_divisions = 5;
struct timeval tv1, tv2;
for (int image_index = 0; image_index < num_images; image_index++) {
Mat raw_image = imread(images[image_index], 0);
int *motion_area = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *difference = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *edges_1 = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
int *edges_2 = (int *)malloc(raw_image.cols * raw_image.rows * sizeof(int));
for (int i = 0; i < raw_image.cols * raw_image.rows; i++) {
edges_1[i] = rand() % 2;
edges_2[i] = rand() % 2;
}
gettimeofday(&tv1, NULL);
motion_detect(motion_area, difference, edges_1, edges_2, raw_image.cols, raw_image.rows, movement_threshold, motion_threshold, horizontal_divisions, vertical_divisions);
gettimeofday(&tv2, NULL);
double parallel_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
gettimeofday(&tv1, NULL);
serial_motion_detect(motion_area, difference, edges_1, edges_2, raw_image.cols, raw_image.rows, movement_threshold, motion_threshold, horizontal_divisions, vertical_divisions);
gettimeofday(&tv2, NULL);
double serial_computation_time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
printf("Test image: %s\n", images[image_index]);
printf("Parallel motion area estimation execution time: %f seconds\n", parallel_computation_time);
printf("Serial motion area estimation execution time: %f seconds\n", serial_computation_time);
printf("Estimated parallelization speedup: %f\n", serial_computation_time/parallel_computation_time);
}
}
int main() {
const char *images[9];
images[0] = "../../images/city_100_100.jpg";
images[1] = "../../images/city_500_500.jpg";
images[2] = "../../images/city_1000_1000.jpg";
images[3] = "../../images/city_2000_2000.jpg";
images[4] = "../../images/city_3000_3000.jpg";
images[5] = "../../images/city_4000_4000.jpg";
images[6] = "../../images/city_5000_5000.jpg";
images[7] = "../../images/city_6000_6000.jpg";
images[8] = "../../images/city_7500_7500.jpg";
srand(time(NULL));
printf("==========SERIAL NAIVE VS SEPARABLE CONVOLUTION COMPARISON==========\n");
compare_naive_separable_convolution(images, 9);
printf("==========CPU VS GPU SEPARABLE CONVOLUTION SPEEDUP==========\n");
compare_separable_convolution_speedup(images, 9);
printf("==========CPU VS GPU NON-MAXIMUM SUPPRESSION AND SELECTIVE THRESHOLDING SPEEDUP==========\n");
non_maximum_suppression_selective_thresholding_speedup(images, 8);
printf("==========CPU VS GPU MOTION AREA ESTIMATION SPEEDUP==========\n");
motion_area_estimation_speedup(images, 9);
return 0;
}
|
afb3702a0f0b06a8ab618bac5fbe02465afca662.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_minus_4_a;
int xdim0_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_minus_4_a;
int ydim0_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_minus_4_a;
int xdim1_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_minus_4_a;
int ydim1_update_halo_kernel3_minus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_minus_4_a * (y) + \
xdim0_update_halo_kernel3_minus_4_a * ydim0_update_halo_kernel3_minus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_minus_4_a * (y) + \
xdim1_update_halo_kernel3_minus_4_a * ydim1_update_halo_kernel3_minus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_minus_4_a_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_minus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_4_a *
ydim0_update_halo_kernel3_minus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_4_a *
ydim1_update_halo_kernel3_minus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_minus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_minus_4_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 64))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel3_minus_4_a");
OPS_kernels[64].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_minus_4_a_h ||
ydim0 != ydim0_update_halo_kernel3_minus_4_a_h ||
xdim1 != xdim1_update_halo_kernel3_minus_4_a_h ||
ydim1 != ydim1_update_halo_kernel3_minus_4_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel3_minus_4_a, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_minus_4_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel3_minus_4_a, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_minus_4_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel3_minus_4_a, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_minus_4_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel3_minus_4_a, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_minus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_4_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[64].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 64;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 64;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_minus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel3_minus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
| afb3702a0f0b06a8ab618bac5fbe02465afca662.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_minus_4_a;
int xdim0_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_minus_4_a;
int ydim0_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_minus_4_a;
int xdim1_update_halo_kernel3_minus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_minus_4_a;
int ydim1_update_halo_kernel3_minus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_minus_4_a * (y) + \
xdim0_update_halo_kernel3_minus_4_a * ydim0_update_halo_kernel3_minus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_minus_4_a * (y) + \
xdim1_update_halo_kernel3_minus_4_a * ydim1_update_halo_kernel3_minus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_minus_4_a_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_minus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_4_a *
ydim0_update_halo_kernel3_minus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_4_a *
ydim1_update_halo_kernel3_minus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_minus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_minus_4_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 64))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel3_minus_4_a");
OPS_kernels[64].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_minus_4_a_h ||
ydim0 != ydim0_update_halo_kernel3_minus_4_a_h ||
xdim1 != xdim1_update_halo_kernel3_minus_4_a_h ||
ydim1 != ydim1_update_halo_kernel3_minus_4_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel3_minus_4_a, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_minus_4_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel3_minus_4_a, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_minus_4_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel3_minus_4_a, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_minus_4_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel3_minus_4_a, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_minus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel3_minus_4_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[64].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[64].mpi_time += t2 - t1;
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 64;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 64;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_minus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(64, "update_halo_kernel3_minus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
7e3ac3e3520c0d71dc873fe52e299b9ee5a7d569.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void broadcast_fw_dev(
const half *px,
std::uint32_t skip1, std::uint32_t skip2, std::uint32_t size,
half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = px[i % skip1 + (i / skip2) * skip1];
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::broadcast_fw_impl(
const Tensor &x, std::uint32_t dim, std::uint32_t size, Tensor &y) {
const std::uint32_t skip1 = y.shape().lower_volume(dim);
const std::uint32_t skip2 = skip1 * size;
const std::uint32_t total = y.shape().size();
const std::uint32_t g1 = GRID_SIZE(total, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::broadcast_fw_dev), dim3(g1), dim3(dim1_x_), 0, 0,
CDATA(half, x), skip1, skip2, total, MDATA(half, y));
}
} // namespace devices
} // namespace primitiv
| 7e3ac3e3520c0d71dc873fe52e299b9ee5a7d569.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void broadcast_fw_dev(
const half *px,
std::uint32_t skip1, std::uint32_t skip2, std::uint32_t size,
half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = px[i % skip1 + (i / skip2) * skip1];
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::broadcast_fw_impl(
const Tensor &x, std::uint32_t dim, std::uint32_t size, Tensor &y) {
const std::uint32_t skip1 = y.shape().lower_volume(dim);
const std::uint32_t skip2 = skip1 * size;
const std::uint32_t total = y.shape().size();
const std::uint32_t g1 = GRID_SIZE(total, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::broadcast_fw_dev<<<g1, dim1_x_>>>(
CDATA(half, x), skip1, skip2, total, MDATA(half, y));
}
} // namespace devices
} // namespace primitiv
|
13e2b1d0a79deb050a33f23d9d722428bc223ae3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
//host
extern float *Hy, coe_Hy, dt, dz;
extern int size_space, size_Hy;
const float PI = 3.141592653589793f;
const float mu = (4 * PI)*1e-7f;
//device
extern float *dev_Hy, *dev_Ex;
void Hy_init_malloc(int );
void Hy_init_assignValue(int );
void Hy_checkout();
void Hy_transfer_host_device();
void Hy_transfer_device_host();
void Hy_init(int space_size)
{
size_Hy = space_size;
Hy_init_malloc(size_Hy);
Hy_init_assignValue(size_Hy);
}
void Hy_init_malloc(int size)
{
//host
Hy = (float *)malloc(size * sizeof(float));
//device
hipMalloc(&dev_Hy, size * sizeof(float));
}
void Hy_init_assignValue(int size)
{
int i;
for ( i = 0; i < size; i++){
Hy[i] = 0.f;
}
coe_Hy = dt / (mu * dz);
}
void Hy_checkout(int size)
{
cout << "Hy: size = " << size << endl;
cout << "coe_Hy = " << coe_Hy;
cout << "Hy: ";
for (int i = 0; i < size; i++)
{
cout << Hy[i] << "\t";
}
cout << endl;
}
void Hy_transfer_host_device(int size_Hy)
{
hipMemcpy(dev_Hy, Hy, size_Hy * sizeof(float), hipMemcpyHostToDevice);
}
void Hy_transfer_device_host(int size_Hy)
{
hipMemcpy(Hy, dev_Hy, size_Hy * sizeof(float), hipMemcpyDeviceToHost);
}
__global__ void Hy_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Hy, int size_space)
{
int i;
for (i = 0; i < size_space; i++){
dev_Hy[i] = dev_Hy[i] - (coe_Hy)*(dev_Ex[i + 1] - dev_Ex[i]);
//test
//dev_Hy[i] = i*10.0;
}
} | 13e2b1d0a79deb050a33f23d9d722428bc223ae3.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
//host
extern float *Hy, coe_Hy, dt, dz;
extern int size_space, size_Hy;
const float PI = 3.141592653589793f;
const float mu = (4 * PI)*1e-7f;
//device
extern float *dev_Hy, *dev_Ex;
void Hy_init_malloc(int );
void Hy_init_assignValue(int );
void Hy_checkout();
void Hy_transfer_host_device();
void Hy_transfer_device_host();
void Hy_init(int space_size)
{
size_Hy = space_size;
Hy_init_malloc(size_Hy);
Hy_init_assignValue(size_Hy);
}
void Hy_init_malloc(int size)
{
//host
Hy = (float *)malloc(size * sizeof(float));
//device
cudaMalloc(&dev_Hy, size * sizeof(float));
}
void Hy_init_assignValue(int size)
{
int i;
for ( i = 0; i < size; i++){
Hy[i] = 0.f;
}
coe_Hy = dt / (mu * dz);
}
void Hy_checkout(int size)
{
cout << "Hy: size = " << size << endl;
cout << "coe_Hy = " << coe_Hy;
cout << "Hy: ";
for (int i = 0; i < size; i++)
{
cout << Hy[i] << "\t";
}
cout << endl;
}
void Hy_transfer_host_device(int size_Hy)
{
cudaMemcpy(dev_Hy, Hy, size_Hy * sizeof(float), cudaMemcpyHostToDevice);
}
void Hy_transfer_device_host(int size_Hy)
{
cudaMemcpy(Hy, dev_Hy, size_Hy * sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void Hy_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Hy, int size_space)
{
int i;
for (i = 0; i < size_space; i++){
dev_Hy[i] = dev_Hy[i] - (coe_Hy)*(dev_Ex[i + 1] - dev_Ex[i]);
//test
//dev_Hy[i] = i*10.0;
}
} |
3d9dc1805b2ef66c67aad6be8e94a61496baa10d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define MAX_CELL_PER_THREAD 1
__device__ void gpu_swap(void **a, void **b) {
void *temp = *a;
*a = *b;
*b = temp;
}
__global__ void kernel_compute_gen_singleblock_1(uint8_t *matrix_src, uint8_t *matrix_dst, uint32_t dim_m1, uint32_t cols, uint32_t iterations) {
uint32_t cell = threadIdx.x;
uint32_t x1 = cell & cols-1; //% cols;
uint32_t x0 = (x1 - 1) & cols-1; //% cols;
uint32_t x2 = (x1 + 1) & cols-1; //% cols;
uint32_t y1 = cell - x1;
uint32_t y0 = (y1 - cols) & dim_m1; //% dim;
uint32_t y2 = (y1 + cols) & dim_m1; //% dim;
for (uint32_t iter = 0; iter < iterations; iter++){
uint8_t aliveCells = matrix_src[x0 + y0] + matrix_src[x1 + y0] + matrix_src[x2 + y0] + matrix_src[x0 + y1] +
matrix_src[x2 + y1] + matrix_src[x0 + y2] + matrix_src[x1 + y2] + matrix_src[x2 + y2];
matrix_dst[y1 + x1] = (aliveCells == 3 || (aliveCells == 2 && matrix_src[y1 + x1])) ? 1 : 0;
gpu_swap((void**)&matrix_dst,(void**)&matrix_src);
__syncthreads();
}
} | 3d9dc1805b2ef66c67aad6be8e94a61496baa10d.cu | #include "includes.h"
#define MAX_CELL_PER_THREAD 1
__device__ void gpu_swap(void **a, void **b) {
void *temp = *a;
*a = *b;
*b = temp;
}
__global__ void kernel_compute_gen_singleblock_1(uint8_t *matrix_src, uint8_t *matrix_dst, uint32_t dim_m1, uint32_t cols, uint32_t iterations) {
uint32_t cell = threadIdx.x;
uint32_t x1 = cell & cols-1; //% cols;
uint32_t x0 = (x1 - 1) & cols-1; //% cols;
uint32_t x2 = (x1 + 1) & cols-1; //% cols;
uint32_t y1 = cell - x1;
uint32_t y0 = (y1 - cols) & dim_m1; //% dim;
uint32_t y2 = (y1 + cols) & dim_m1; //% dim;
for (uint32_t iter = 0; iter < iterations; iter++){
uint8_t aliveCells = matrix_src[x0 + y0] + matrix_src[x1 + y0] + matrix_src[x2 + y0] + matrix_src[x0 + y1] +
matrix_src[x2 + y1] + matrix_src[x0 + y2] + matrix_src[x1 + y2] + matrix_src[x2 + y2];
matrix_dst[y1 + x1] = (aliveCells == 3 || (aliveCells == 2 && matrix_src[y1 + x1])) ? 1 : 0;
gpu_swap((void**)&matrix_dst,(void**)&matrix_src);
__syncthreads();
}
} |
d41dd0ebd39225a57cfba6a7917e443485c20061.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2017, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <utilities/column_utils.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/nvtx/nvtx_utils.h>
#include <string/nvcategory_util.hpp>
#include <nvstrings/NVCategory.h>
#include <copying/gather.hpp>
#include "joining.h"
#include <limits>
#include <set>
#include <vector>
// Size limit due to use of int32 as join output.
// FIXME: upgrade to 64-bit
using output_index_type = gdf_index_type;
constexpr output_index_type MAX_JOIN_SIZE{std::numeric_limits<output_index_type>::max()};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the Join result between two tables using the hash-based implementation.
*
* @param[in] num_cols The number of columns to join
* @param[in] leftcol The left set of columns to join
* @param[in] rightcol The right set of columns to join
* @param[out] l_result The join computed indices of the left table
* @param[out] r_result The join computed indices of the right table
* @tparam join_type The type of join to be performed
*
* @returns Upon successful computation, returns GDF_SUCCESS. Otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
template <JoinType join_type>
gdf_error hash_join(gdf_size_type num_cols, gdf_column **leftcol, gdf_column **rightcol,
gdf_column *l_result, gdf_column *r_result)
{
cudf::table left_table{leftcol, num_cols};
cudf::table right_table{rightcol, num_cols};
return join_hash<join_type, output_index_type>(left_table, right_table,
l_result, r_result);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Allocates a buffer and fills it with a repeated value
*
* @param[in,out] buffer Address of the buffer to be allocated
* @param[in] buffer_length Amount of memory to be allocated
* @param[in] value The value to be filled into the buffer
* @tparam data_type The data type to be used for the buffer
*
* @returns GDF_SUCCESS upon succesful completion
*/
/* ----------------------------------------------------------------------------*/
template <typename data_type>
gdf_error allocValueBuffer(data_type ** buffer,
const gdf_size_type buffer_length,
const data_type value)
{
RMM_TRY( RMM_ALLOC((void**)buffer, buffer_length*sizeof(data_type), 0) );
thrust::fill(thrust::device, *buffer, *buffer + buffer_length, value);
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Allocates a buffer and fills it with a sequence
*
* @param[in,out] buffer Address of the buffer to be allocated
* @param[in] buffer_length Amount of memory to be allocated
* @tparam data_type The data type to be used for the buffer
*
* @returns GDF_SUCCESS upon succesful completion
*/
/* ----------------------------------------------------------------------------*/
template <typename data_type>
gdf_error allocSequenceBuffer(data_type ** buffer,
const gdf_size_type buffer_length)
{
RMM_TRY( RMM_ALLOC((void**)buffer, buffer_length*sizeof(data_type), 0) );
thrust::sequence(thrust::device, *buffer, *buffer + buffer_length);
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Trivially computes full join of two tables if one of the tables
* are empty
*
* @param[in] left_size The size of the left table
* @param[in] right_size The size of the right table
* @param[in] rightcol The right set of columns to join
* @param[out] left_result The join computed indices of the left table
* @param[out] right_result The join computed indices of the right table
*
* @returns GDF_SUCCESS upon succesfull compute, otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
gdf_error trivial_full_join(
const gdf_size_type left_size,
const gdf_size_type right_size,
gdf_column *left_result,
gdf_column *right_result) {
// Deduce the type of the output gdf_columns
gdf_dtype dtype;
switch(sizeof(output_index_type))
{
case 1 : dtype = GDF_INT8; break;
case 2 : dtype = GDF_INT16; break;
case 4 : dtype = GDF_INT32; break;
case 8 : dtype = GDF_INT64; break;
}
output_index_type *l_ptr{nullptr};
output_index_type *r_ptr{nullptr};
gdf_size_type result_size{0};
if ((left_size == 0) && (right_size == 0)) {
return GDF_DATASET_EMPTY;
}
if (left_size == 0) {
allocValueBuffer(&l_ptr, right_size,
static_cast<output_index_type>(-1));
allocSequenceBuffer(&r_ptr, right_size);
result_size = right_size;
} else if (right_size == 0) {
allocValueBuffer(&r_ptr, left_size,
static_cast<output_index_type>(-1));
allocSequenceBuffer(&l_ptr, left_size);
result_size = left_size;
}
gdf_column_view( left_result, l_ptr, nullptr, result_size, dtype);
gdf_column_view(right_result, r_ptr, nullptr, result_size, dtype);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the join operation between two sets of columns
*
* @param[in] num_cols The number of columns to join
* @param[in] leftcol The left set of columns to join
* @param[in] rightcol The right set of columns to join
* @param[out] left_result The join computed indices of the left table
* @param[out] right_result The join computed indices of the right table
* @param[in] join_context A structure that determines various run parameters, such as
* whether to perform a hash or sort based join
* @tparam join_type The type of join to be performed
*
* @returns GDF_SUCCESS upon succesfull compute, otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
template <JoinType join_type>
gdf_error join_call( int num_cols, gdf_column **leftcol, gdf_column **rightcol,
gdf_column *left_result, gdf_column *right_result,
gdf_context *join_context)
{
GDF_REQUIRE( 0 != num_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != leftcol, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != rightcol, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != join_context, GDF_INVALID_API_CALL);
const auto left_col_size = leftcol[0]->size;
const auto right_col_size = rightcol[0]->size;
GDF_REQUIRE( left_col_size < MAX_JOIN_SIZE, GDF_COLUMN_SIZE_TOO_BIG);
GDF_REQUIRE( right_col_size < MAX_JOIN_SIZE, GDF_COLUMN_SIZE_TOO_BIG);
// If both frames are empty, return immediately
if((0 == left_col_size ) && (0 == right_col_size)) {
return GDF_SUCCESS;
}
// If left join and the left table is empty, return immediately
if( (JoinType::LEFT_JOIN == join_type) && (0 == left_col_size)){
return GDF_SUCCESS;
}
// If Inner Join and either table is empty, return immediately
if( (JoinType::INNER_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size)) ){
return GDF_SUCCESS;
}
// If Full Join and either table is empty, compute trivial full join
if( (JoinType::FULL_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size)) ){
return trivial_full_join(left_col_size, right_col_size, left_result, right_result);
}
// check that the columns data are not null, have matching types,
// and the same number of rows
for (int i = 0; i < num_cols; i++) {
if((right_col_size > 0) && (nullptr == rightcol[i]->data)){
return GDF_DATASET_EMPTY;
}
if((left_col_size > 0) && (nullptr == leftcol[i]->data)){
return GDF_DATASET_EMPTY;
}
if(rightcol[i]->dtype != leftcol[i]->dtype) return GDF_DTYPE_MISMATCH;
if(left_col_size != leftcol[i]->size) return GDF_COLUMN_SIZE_MISMATCH;
if(right_col_size != rightcol[i]->size) return GDF_COLUMN_SIZE_MISMATCH;
// Ensure GDF_TIMESTAMP columns have the same resolution
if (GDF_TIMESTAMP == rightcol[i]->dtype) {
GDF_REQUIRE(
rightcol[i]->dtype_info.time_unit == leftcol[i]->dtype_info.time_unit,
GDF_TIMESTAMP_RESOLUTION_MISMATCH);
}
}
gdf_method join_method = join_context->flag_method;
gdf_error gdf_error_code{GDF_SUCCESS};
PUSH_RANGE("LIBGDF_JOIN", JOIN_COLOR);
switch(join_method)
{
case GDF_HASH:
{
gdf_error_code = hash_join<join_type>(num_cols, leftcol, rightcol, left_result, right_result);
break;
}
case GDF_SORT:
{
// Sort based joins only support single column joins
if(1 == num_cols)
{
gdf_error_code = sort_join<join_type, output_index_type>(leftcol[0], rightcol[0], left_result, right_result);
}
else
{
gdf_error_code = GDF_JOIN_TOO_MANY_COLUMNS;
}
break;
}
default:
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
POP_RANGE();
return gdf_error_code;
}
template <JoinType join_type, typename index_type>
gdf_error construct_join_output_df(
std::vector<gdf_column*>& ljoincol,
std::vector<gdf_column*>& rjoincol,
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column ** result_cols,
gdf_column * left_indices,
gdf_column * right_indices) {
PUSH_RANGE("LIBGDF_JOIN_OUTPUT", JOIN_COLOR);
//create left and right input table with columns not joined on
std::vector<gdf_column*> lnonjoincol;
std::vector<gdf_column*> rnonjoincol;
std::set<int> l_join_indices, r_join_indices;
for (int i = 0; i < num_cols_to_join; ++i) {
l_join_indices.insert(left_join_cols[i]);
r_join_indices.insert(right_join_cols[i]);
}
for (int i = 0; i < num_left_cols; ++i) {
if (l_join_indices.find(i) == l_join_indices.end()) {
lnonjoincol.push_back(left_cols[i]);
}
}
for (int i = 0; i < num_right_cols; ++i) {
if (r_join_indices.find(i) == r_join_indices.end()) {
rnonjoincol.push_back(right_cols[i]);
}
}
//TODO : Invalid api
gdf_size_type join_size = left_indices->size;
int left_table_end = num_left_cols - num_cols_to_join;
int right_table_begin = num_left_cols;
//create left and right output column data buffers
for (int i = 0; i < left_table_end; ++i) {
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, lnonjoincol[i]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( hipMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
for (int i = right_table_begin; i < result_num_cols; ++i) {
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, rnonjoincol[i - right_table_begin]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( hipMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
//create joined output column data buffers
for (int join_index = 0; join_index < num_cols_to_join; ++join_index) {
int i = left_table_end + join_index;
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, left_cols[left_join_cols[join_index]]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( hipMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
// If the join_type is an outer join, then indices for non-matches will be
// -1, requiring bounds checking when gathering the result table
bool const check_bounds{ join_type != JoinType::INNER_JOIN };
// Construct the left columns
if (0 != lnonjoincol.size()) {
cudf::table left_source_table(lnonjoincol.data(), lnonjoincol.size());
cudf::table left_destination_table(result_cols,
num_left_cols - num_cols_to_join);
cudf::detail::gather(&left_source_table,
static_cast<index_type const *>(left_indices->data),
&left_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(left_source_table,left_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
// Construct the right columns
if (0 != rnonjoincol.size()) {
cudf::table right_source_table(rnonjoincol.data(), rnonjoincol.size());
cudf::table right_destination_table(result_cols + right_table_begin,
num_right_cols - num_cols_to_join);
cudf::detail::gather(&right_source_table,
static_cast<index_type const *>(right_indices->data),
&right_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(right_source_table,right_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
// Construct the joined columns
if (0 != ljoincol.size()) {
cudf::table join_source_table(ljoincol.data(), ljoincol.size());
cudf::table join_destination_table(result_cols + left_table_end,
num_cols_to_join);
// Gather valid rows from the right table
// TODO: Revisit this, because it probably can be done more efficiently
if (JoinType::FULL_JOIN == join_type) {
cudf::table right_source_table(rjoincol.data(), rjoincol.size());
cudf::detail::gather(
&right_source_table,
static_cast<index_type const *>(right_indices->data),
&join_destination_table, check_bounds);
CHECK_STREAM(0);
}
cudf::detail::gather(&join_source_table,
static_cast<index_type const *>(left_indices->data),
&join_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(join_source_table,join_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
POP_RANGE();
return GDF_SUCCESS;
}
template <JoinType join_type, typename index_type>
gdf_error join_call_compute_df(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
GDF_REQUIRE(nullptr != left_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != right_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(0 != num_cols_to_join, GDF_SUCCESS);
GDF_REQUIRE(nullptr != left_join_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != right_join_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != join_context, GDF_INVALID_API_CALL);
for(int column_index = 0; column_index < num_left_cols; column_index++){
GDF_REQUIRE(left_cols[column_index]->dtype != GDF_invalid,GDF_UNSUPPORTED_DTYPE);
}
for(int column_index = 0; column_index < num_right_cols; column_index++){
GDF_REQUIRE(right_cols[column_index]->dtype != GDF_invalid,GDF_UNSUPPORTED_DTYPE);
}
// Determine if requested output is the indices of matching rows, the fully
// constructed output dataframe, or both
bool const construct_output_dataframe{nullptr != result_cols};
bool const return_output_indices{(nullptr != left_indices) and
(nullptr != right_indices)};
GDF_REQUIRE(construct_output_dataframe or return_output_indices,
GDF_INVALID_API_CALL);
auto const left_col_size = left_cols[0]->size;
auto const right_col_size = right_cols[0]->size;
// If the inputs are empty, immediately return
if ((0 == left_col_size) && (0 == right_col_size)) {
return GDF_SUCCESS;
}
// If left join and the left table is empty, return immediately
if ((JoinType::LEFT_JOIN == join_type) && (0 == left_col_size)) {
return GDF_SUCCESS;
}
// If Inner Join and either table is empty, return immediately
if ((JoinType::INNER_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size))) {
return GDF_SUCCESS;
}
//if the inputs are nvcategory we need to make the dictionaries comparable
bool at_least_one_category_column = false;
for(int join_column_index = 0; join_column_index < num_cols_to_join; join_column_index++){
at_least_one_category_column |= left_cols[left_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY;
}
std::vector<gdf_column*> new_left_cols(left_cols, left_cols + num_left_cols);
std::vector<gdf_column*> new_right_cols(right_cols, right_cols + num_right_cols);
std::vector<gdf_column *> temp_columns_to_free;
if(at_least_one_category_column){
for(int join_column_index = 0; join_column_index < num_cols_to_join; join_column_index++){
if(left_cols[left_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY){
GDF_REQUIRE(right_cols[right_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY, GDF_DTYPE_MISMATCH);
gdf_column * left_original_column = new_left_cols[left_join_cols[join_column_index]];
gdf_column * right_original_column = new_right_cols[right_join_cols[join_column_index]];
gdf_column * new_left_column_ptr = new gdf_column{};
gdf_column * new_right_column_ptr = new gdf_column{};
temp_columns_to_free.push_back(new_left_column_ptr);
temp_columns_to_free.push_back(new_right_column_ptr);
gdf_column * input_join_columns_merge[2] = {left_original_column, right_original_column};
gdf_column * new_join_columns[2] = {new_left_column_ptr,
new_right_column_ptr};
gdf_column_view(new_left_column_ptr, nullptr, nullptr, left_original_column->size, GDF_STRING_CATEGORY);
gdf_column_view(new_right_column_ptr, nullptr, nullptr, right_original_column->size, GDF_STRING_CATEGORY);
int col_width = cudf::byte_width(*new_left_column_ptr);
RMM_TRY( RMM_ALLOC(&(new_left_column_ptr->data), col_width * left_original_column->size, 0) ); // TODO: non-default stream?
if(left_original_column->valid != nullptr){
RMM_TRY( RMM_ALLOC(&(new_left_column_ptr->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(left_original_column->size), 0) );
CUDA_TRY( hipMemcpy(new_left_column_ptr->valid, left_original_column->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(left_original_column->size),hipMemcpyDeviceToDevice) );
}else{
new_left_column_ptr->valid = nullptr;
}
new_left_column_ptr->null_count = left_original_column->null_count;
RMM_TRY( RMM_ALLOC(&(new_right_column_ptr->data), col_width * right_original_column->size, 0) ); // TODO: non-default stream?
if(right_original_column->valid != nullptr){
RMM_TRY( RMM_ALLOC(&(new_right_column_ptr->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(right_original_column->size), 0) );
CUDA_TRY( hipMemcpy(new_right_column_ptr->valid, right_original_column->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(right_original_column->size),hipMemcpyDeviceToDevice) );
}else{
new_right_column_ptr->valid = nullptr;
}
new_right_column_ptr->null_count = right_original_column->null_count;
gdf_error err = sync_column_categories(input_join_columns_merge,
new_join_columns,
2);
GDF_REQUIRE(GDF_SUCCESS == err, err);
new_left_cols[left_join_cols[join_column_index]] = new_join_columns[0];
new_right_cols[right_join_cols[join_column_index]] = new_join_columns[1];
CHECK_STREAM(0);
}
}
left_cols = new_left_cols.data();
right_cols = new_right_cols.data();
}
// If index outputs are not requested, create columns to store them
// for computing combined join output
gdf_column *left_index_out = left_indices;
gdf_column *right_index_out = right_indices;
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column *)>>;
auto gdf_col_deleter = [](gdf_column *col) {
col->size = 0;
if (col->data) {
RMM_FREE(col->data, 0);
}
if (col->valid) {
RMM_FREE(col->valid, 0);
}
};
gdf_col_pointer l_index_temp, r_index_temp;
if (nullptr == left_indices) {
l_index_temp = {new gdf_column{}, gdf_col_deleter};
left_index_out = l_index_temp.get();
}
if (nullptr == right_indices) {
r_index_temp = {new gdf_column{}, gdf_col_deleter};
right_index_out = r_index_temp.get();
}
//get column pointers to join on
std::vector<gdf_column*> ljoincol;
std::vector<gdf_column*> rjoincol;
for (int i = 0; i < num_cols_to_join; ++i) {
ljoincol.push_back(left_cols[ left_join_cols[i] ]);
rjoincol.push_back(right_cols[ right_join_cols[i] ]);
}
gdf_error join_err = join_call<join_type>(num_cols_to_join,
ljoincol.data(), rjoincol.data(),
left_index_out, right_index_out,
join_context);
CHECK_STREAM(0);
GDF_REQUIRE(GDF_SUCCESS == join_err, join_err);
//If construct_output_dataframe is false then left_index_out or right_index_out
//was not dynamically allocated.
if (not construct_output_dataframe) {
return join_err;
}
gdf_error df_err =
construct_join_output_df<join_type, index_type>(
ljoincol, rjoincol,
left_cols, num_left_cols, left_join_cols,
right_cols, num_right_cols, right_join_cols,
num_cols_to_join, result_num_cols, result_cols,
left_index_out, right_index_out);
CHECK_STREAM(0);
l_index_temp.reset(nullptr);
r_index_temp.reset(nullptr);
//freeing up the temp column used to synch categories between columns
for(unsigned int column_to_free = 0; column_to_free < temp_columns_to_free.size(); column_to_free++){
gdf_column_free(temp_columns_to_free[column_to_free]);
delete temp_columns_to_free[column_to_free];
}
CHECK_STREAM(0);
return df_err;
}
gdf_error gdf_left_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::LEFT_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
gdf_error gdf_inner_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::INNER_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
gdf_error gdf_full_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::FULL_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
| d41dd0ebd39225a57cfba6a7917e443485c20061.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <utilities/column_utils.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/nvtx/nvtx_utils.h>
#include <string/nvcategory_util.hpp>
#include <nvstrings/NVCategory.h>
#include <copying/gather.hpp>
#include "joining.h"
#include <limits>
#include <set>
#include <vector>
// Size limit due to use of int32 as join output.
// FIXME: upgrade to 64-bit
using output_index_type = gdf_index_type;
constexpr output_index_type MAX_JOIN_SIZE{std::numeric_limits<output_index_type>::max()};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the Join result between two tables using the hash-based implementation.
*
* @param[in] num_cols The number of columns to join
* @param[in] leftcol The left set of columns to join
* @param[in] rightcol The right set of columns to join
* @param[out] l_result The join computed indices of the left table
* @param[out] r_result The join computed indices of the right table
* @tparam join_type The type of join to be performed
*
* @returns Upon successful computation, returns GDF_SUCCESS. Otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
template <JoinType join_type>
gdf_error hash_join(gdf_size_type num_cols, gdf_column **leftcol, gdf_column **rightcol,
gdf_column *l_result, gdf_column *r_result)
{
cudf::table left_table{leftcol, num_cols};
cudf::table right_table{rightcol, num_cols};
return join_hash<join_type, output_index_type>(left_table, right_table,
l_result, r_result);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Allocates a buffer and fills it with a repeated value
*
* @param[in,out] buffer Address of the buffer to be allocated
* @param[in] buffer_length Amount of memory to be allocated
* @param[in] value The value to be filled into the buffer
* @tparam data_type The data type to be used for the buffer
*
* @returns GDF_SUCCESS upon succesful completion
*/
/* ----------------------------------------------------------------------------*/
template <typename data_type>
gdf_error allocValueBuffer(data_type ** buffer,
const gdf_size_type buffer_length,
const data_type value)
{
RMM_TRY( RMM_ALLOC((void**)buffer, buffer_length*sizeof(data_type), 0) );
thrust::fill(thrust::device, *buffer, *buffer + buffer_length, value);
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Allocates a buffer and fills it with a sequence
*
* @param[in,out] buffer Address of the buffer to be allocated
* @param[in] buffer_length Amount of memory to be allocated
* @tparam data_type The data type to be used for the buffer
*
* @returns GDF_SUCCESS upon succesful completion
*/
/* ----------------------------------------------------------------------------*/
template <typename data_type>
gdf_error allocSequenceBuffer(data_type ** buffer,
const gdf_size_type buffer_length)
{
RMM_TRY( RMM_ALLOC((void**)buffer, buffer_length*sizeof(data_type), 0) );
thrust::sequence(thrust::device, *buffer, *buffer + buffer_length);
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Trivially computes full join of two tables if one of the tables
* are empty
*
* @param[in] left_size The size of the left table
* @param[in] right_size The size of the right table
* @param[in] rightcol The right set of columns to join
* @param[out] left_result The join computed indices of the left table
* @param[out] right_result The join computed indices of the right table
*
* @returns GDF_SUCCESS upon succesfull compute, otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
gdf_error trivial_full_join(
const gdf_size_type left_size,
const gdf_size_type right_size,
gdf_column *left_result,
gdf_column *right_result) {
// Deduce the type of the output gdf_columns
gdf_dtype dtype;
switch(sizeof(output_index_type))
{
case 1 : dtype = GDF_INT8; break;
case 2 : dtype = GDF_INT16; break;
case 4 : dtype = GDF_INT32; break;
case 8 : dtype = GDF_INT64; break;
}
output_index_type *l_ptr{nullptr};
output_index_type *r_ptr{nullptr};
gdf_size_type result_size{0};
if ((left_size == 0) && (right_size == 0)) {
return GDF_DATASET_EMPTY;
}
if (left_size == 0) {
allocValueBuffer(&l_ptr, right_size,
static_cast<output_index_type>(-1));
allocSequenceBuffer(&r_ptr, right_size);
result_size = right_size;
} else if (right_size == 0) {
allocValueBuffer(&r_ptr, left_size,
static_cast<output_index_type>(-1));
allocSequenceBuffer(&l_ptr, left_size);
result_size = left_size;
}
gdf_column_view( left_result, l_ptr, nullptr, result_size, dtype);
gdf_column_view(right_result, r_ptr, nullptr, result_size, dtype);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the join operation between two sets of columns
*
* @param[in] num_cols The number of columns to join
* @param[in] leftcol The left set of columns to join
* @param[in] rightcol The right set of columns to join
* @param[out] left_result The join computed indices of the left table
* @param[out] right_result The join computed indices of the right table
* @param[in] join_context A structure that determines various run parameters, such as
* whether to perform a hash or sort based join
* @tparam join_type The type of join to be performed
*
* @returns GDF_SUCCESS upon succesfull compute, otherwise returns appropriate error code
*/
/* ----------------------------------------------------------------------------*/
template <JoinType join_type>
gdf_error join_call( int num_cols, gdf_column **leftcol, gdf_column **rightcol,
gdf_column *left_result, gdf_column *right_result,
gdf_context *join_context)
{
GDF_REQUIRE( 0 != num_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != leftcol, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != rightcol, GDF_DATASET_EMPTY);
GDF_REQUIRE( nullptr != join_context, GDF_INVALID_API_CALL);
const auto left_col_size = leftcol[0]->size;
const auto right_col_size = rightcol[0]->size;
GDF_REQUIRE( left_col_size < MAX_JOIN_SIZE, GDF_COLUMN_SIZE_TOO_BIG);
GDF_REQUIRE( right_col_size < MAX_JOIN_SIZE, GDF_COLUMN_SIZE_TOO_BIG);
// If both frames are empty, return immediately
if((0 == left_col_size ) && (0 == right_col_size)) {
return GDF_SUCCESS;
}
// If left join and the left table is empty, return immediately
if( (JoinType::LEFT_JOIN == join_type) && (0 == left_col_size)){
return GDF_SUCCESS;
}
// If Inner Join and either table is empty, return immediately
if( (JoinType::INNER_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size)) ){
return GDF_SUCCESS;
}
// If Full Join and either table is empty, compute trivial full join
if( (JoinType::FULL_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size)) ){
return trivial_full_join(left_col_size, right_col_size, left_result, right_result);
}
// check that the columns data are not null, have matching types,
// and the same number of rows
for (int i = 0; i < num_cols; i++) {
if((right_col_size > 0) && (nullptr == rightcol[i]->data)){
return GDF_DATASET_EMPTY;
}
if((left_col_size > 0) && (nullptr == leftcol[i]->data)){
return GDF_DATASET_EMPTY;
}
if(rightcol[i]->dtype != leftcol[i]->dtype) return GDF_DTYPE_MISMATCH;
if(left_col_size != leftcol[i]->size) return GDF_COLUMN_SIZE_MISMATCH;
if(right_col_size != rightcol[i]->size) return GDF_COLUMN_SIZE_MISMATCH;
// Ensure GDF_TIMESTAMP columns have the same resolution
if (GDF_TIMESTAMP == rightcol[i]->dtype) {
GDF_REQUIRE(
rightcol[i]->dtype_info.time_unit == leftcol[i]->dtype_info.time_unit,
GDF_TIMESTAMP_RESOLUTION_MISMATCH);
}
}
gdf_method join_method = join_context->flag_method;
gdf_error gdf_error_code{GDF_SUCCESS};
PUSH_RANGE("LIBGDF_JOIN", JOIN_COLOR);
switch(join_method)
{
case GDF_HASH:
{
gdf_error_code = hash_join<join_type>(num_cols, leftcol, rightcol, left_result, right_result);
break;
}
case GDF_SORT:
{
// Sort based joins only support single column joins
if(1 == num_cols)
{
gdf_error_code = sort_join<join_type, output_index_type>(leftcol[0], rightcol[0], left_result, right_result);
}
else
{
gdf_error_code = GDF_JOIN_TOO_MANY_COLUMNS;
}
break;
}
default:
gdf_error_code = GDF_UNSUPPORTED_METHOD;
}
POP_RANGE();
return gdf_error_code;
}
template <JoinType join_type, typename index_type>
gdf_error construct_join_output_df(
std::vector<gdf_column*>& ljoincol,
std::vector<gdf_column*>& rjoincol,
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column ** result_cols,
gdf_column * left_indices,
gdf_column * right_indices) {
PUSH_RANGE("LIBGDF_JOIN_OUTPUT", JOIN_COLOR);
//create left and right input table with columns not joined on
std::vector<gdf_column*> lnonjoincol;
std::vector<gdf_column*> rnonjoincol;
std::set<int> l_join_indices, r_join_indices;
for (int i = 0; i < num_cols_to_join; ++i) {
l_join_indices.insert(left_join_cols[i]);
r_join_indices.insert(right_join_cols[i]);
}
for (int i = 0; i < num_left_cols; ++i) {
if (l_join_indices.find(i) == l_join_indices.end()) {
lnonjoincol.push_back(left_cols[i]);
}
}
for (int i = 0; i < num_right_cols; ++i) {
if (r_join_indices.find(i) == r_join_indices.end()) {
rnonjoincol.push_back(right_cols[i]);
}
}
//TODO : Invalid api
gdf_size_type join_size = left_indices->size;
int left_table_end = num_left_cols - num_cols_to_join;
int right_table_begin = num_left_cols;
//create left and right output column data buffers
for (int i = 0; i < left_table_end; ++i) {
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, lnonjoincol[i]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( cudaMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
for (int i = right_table_begin; i < result_num_cols; ++i) {
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, rnonjoincol[i - right_table_begin]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( cudaMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
//create joined output column data buffers
for (int join_index = 0; join_index < num_cols_to_join; ++join_index) {
int i = left_table_end + join_index;
gdf_column_view(result_cols[i], nullptr, nullptr, join_size, left_cols[left_join_cols[join_index]]->dtype);
int col_width = cudf::byte_width(*(result_cols[i]));
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->data), col_width * join_size, 0) ); // TODO: non-default stream?
RMM_TRY( RMM_ALLOC((void**)&(result_cols[i]->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size), 0) );
CUDA_TRY( cudaMemset(result_cols[i]->valid, 0, sizeof(gdf_valid_type)*gdf_valid_allocation_size(join_size)) );
CHECK_STREAM(0);
}
// If the join_type is an outer join, then indices for non-matches will be
// -1, requiring bounds checking when gathering the result table
bool const check_bounds{ join_type != JoinType::INNER_JOIN };
// Construct the left columns
if (0 != lnonjoincol.size()) {
cudf::table left_source_table(lnonjoincol.data(), lnonjoincol.size());
cudf::table left_destination_table(result_cols,
num_left_cols - num_cols_to_join);
cudf::detail::gather(&left_source_table,
static_cast<index_type const *>(left_indices->data),
&left_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(left_source_table,left_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
// Construct the right columns
if (0 != rnonjoincol.size()) {
cudf::table right_source_table(rnonjoincol.data(), rnonjoincol.size());
cudf::table right_destination_table(result_cols + right_table_begin,
num_right_cols - num_cols_to_join);
cudf::detail::gather(&right_source_table,
static_cast<index_type const *>(right_indices->data),
&right_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(right_source_table,right_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
// Construct the joined columns
if (0 != ljoincol.size()) {
cudf::table join_source_table(ljoincol.data(), ljoincol.size());
cudf::table join_destination_table(result_cols + left_table_end,
num_cols_to_join);
// Gather valid rows from the right table
// TODO: Revisit this, because it probably can be done more efficiently
if (JoinType::FULL_JOIN == join_type) {
cudf::table right_source_table(rjoincol.data(), rjoincol.size());
cudf::detail::gather(
&right_source_table,
static_cast<index_type const *>(right_indices->data),
&join_destination_table, check_bounds);
CHECK_STREAM(0);
}
cudf::detail::gather(&join_source_table,
static_cast<index_type const *>(left_indices->data),
&join_destination_table, check_bounds);
CHECK_STREAM(0);
gdf_error update_err = nvcategory_gather_table(join_source_table,join_destination_table);
CHECK_STREAM(0);
GDF_REQUIRE(update_err == GDF_SUCCESS,update_err);
}
POP_RANGE();
return GDF_SUCCESS;
}
template <JoinType join_type, typename index_type>
gdf_error join_call_compute_df(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
GDF_REQUIRE(nullptr != left_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != right_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(0 != num_cols_to_join, GDF_SUCCESS);
GDF_REQUIRE(nullptr != left_join_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != right_join_cols, GDF_DATASET_EMPTY);
GDF_REQUIRE(nullptr != join_context, GDF_INVALID_API_CALL);
for(int column_index = 0; column_index < num_left_cols; column_index++){
GDF_REQUIRE(left_cols[column_index]->dtype != GDF_invalid,GDF_UNSUPPORTED_DTYPE);
}
for(int column_index = 0; column_index < num_right_cols; column_index++){
GDF_REQUIRE(right_cols[column_index]->dtype != GDF_invalid,GDF_UNSUPPORTED_DTYPE);
}
// Determine if requested output is the indices of matching rows, the fully
// constructed output dataframe, or both
bool const construct_output_dataframe{nullptr != result_cols};
bool const return_output_indices{(nullptr != left_indices) and
(nullptr != right_indices)};
GDF_REQUIRE(construct_output_dataframe or return_output_indices,
GDF_INVALID_API_CALL);
auto const left_col_size = left_cols[0]->size;
auto const right_col_size = right_cols[0]->size;
// If the inputs are empty, immediately return
if ((0 == left_col_size) && (0 == right_col_size)) {
return GDF_SUCCESS;
}
// If left join and the left table is empty, return immediately
if ((JoinType::LEFT_JOIN == join_type) && (0 == left_col_size)) {
return GDF_SUCCESS;
}
// If Inner Join and either table is empty, return immediately
if ((JoinType::INNER_JOIN == join_type) &&
((0 == left_col_size) || (0 == right_col_size))) {
return GDF_SUCCESS;
}
//if the inputs are nvcategory we need to make the dictionaries comparable
bool at_least_one_category_column = false;
for(int join_column_index = 0; join_column_index < num_cols_to_join; join_column_index++){
at_least_one_category_column |= left_cols[left_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY;
}
std::vector<gdf_column*> new_left_cols(left_cols, left_cols + num_left_cols);
std::vector<gdf_column*> new_right_cols(right_cols, right_cols + num_right_cols);
std::vector<gdf_column *> temp_columns_to_free;
if(at_least_one_category_column){
for(int join_column_index = 0; join_column_index < num_cols_to_join; join_column_index++){
if(left_cols[left_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY){
GDF_REQUIRE(right_cols[right_join_cols[join_column_index]]->dtype == GDF_STRING_CATEGORY, GDF_DTYPE_MISMATCH);
gdf_column * left_original_column = new_left_cols[left_join_cols[join_column_index]];
gdf_column * right_original_column = new_right_cols[right_join_cols[join_column_index]];
gdf_column * new_left_column_ptr = new gdf_column{};
gdf_column * new_right_column_ptr = new gdf_column{};
temp_columns_to_free.push_back(new_left_column_ptr);
temp_columns_to_free.push_back(new_right_column_ptr);
gdf_column * input_join_columns_merge[2] = {left_original_column, right_original_column};
gdf_column * new_join_columns[2] = {new_left_column_ptr,
new_right_column_ptr};
gdf_column_view(new_left_column_ptr, nullptr, nullptr, left_original_column->size, GDF_STRING_CATEGORY);
gdf_column_view(new_right_column_ptr, nullptr, nullptr, right_original_column->size, GDF_STRING_CATEGORY);
int col_width = cudf::byte_width(*new_left_column_ptr);
RMM_TRY( RMM_ALLOC(&(new_left_column_ptr->data), col_width * left_original_column->size, 0) ); // TODO: non-default stream?
if(left_original_column->valid != nullptr){
RMM_TRY( RMM_ALLOC(&(new_left_column_ptr->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(left_original_column->size), 0) );
CUDA_TRY( cudaMemcpy(new_left_column_ptr->valid, left_original_column->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(left_original_column->size),cudaMemcpyDeviceToDevice) );
}else{
new_left_column_ptr->valid = nullptr;
}
new_left_column_ptr->null_count = left_original_column->null_count;
RMM_TRY( RMM_ALLOC(&(new_right_column_ptr->data), col_width * right_original_column->size, 0) ); // TODO: non-default stream?
if(right_original_column->valid != nullptr){
RMM_TRY( RMM_ALLOC(&(new_right_column_ptr->valid), sizeof(gdf_valid_type)*gdf_valid_allocation_size(right_original_column->size), 0) );
CUDA_TRY( cudaMemcpy(new_right_column_ptr->valid, right_original_column->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(right_original_column->size),cudaMemcpyDeviceToDevice) );
}else{
new_right_column_ptr->valid = nullptr;
}
new_right_column_ptr->null_count = right_original_column->null_count;
gdf_error err = sync_column_categories(input_join_columns_merge,
new_join_columns,
2);
GDF_REQUIRE(GDF_SUCCESS == err, err);
new_left_cols[left_join_cols[join_column_index]] = new_join_columns[0];
new_right_cols[right_join_cols[join_column_index]] = new_join_columns[1];
CHECK_STREAM(0);
}
}
left_cols = new_left_cols.data();
right_cols = new_right_cols.data();
}
// If index outputs are not requested, create columns to store them
// for computing combined join output
gdf_column *left_index_out = left_indices;
gdf_column *right_index_out = right_indices;
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column *)>>;
auto gdf_col_deleter = [](gdf_column *col) {
col->size = 0;
if (col->data) {
RMM_FREE(col->data, 0);
}
if (col->valid) {
RMM_FREE(col->valid, 0);
}
};
gdf_col_pointer l_index_temp, r_index_temp;
if (nullptr == left_indices) {
l_index_temp = {new gdf_column{}, gdf_col_deleter};
left_index_out = l_index_temp.get();
}
if (nullptr == right_indices) {
r_index_temp = {new gdf_column{}, gdf_col_deleter};
right_index_out = r_index_temp.get();
}
//get column pointers to join on
std::vector<gdf_column*> ljoincol;
std::vector<gdf_column*> rjoincol;
for (int i = 0; i < num_cols_to_join; ++i) {
ljoincol.push_back(left_cols[ left_join_cols[i] ]);
rjoincol.push_back(right_cols[ right_join_cols[i] ]);
}
gdf_error join_err = join_call<join_type>(num_cols_to_join,
ljoincol.data(), rjoincol.data(),
left_index_out, right_index_out,
join_context);
CHECK_STREAM(0);
GDF_REQUIRE(GDF_SUCCESS == join_err, join_err);
//If construct_output_dataframe is false then left_index_out or right_index_out
//was not dynamically allocated.
if (not construct_output_dataframe) {
return join_err;
}
gdf_error df_err =
construct_join_output_df<join_type, index_type>(
ljoincol, rjoincol,
left_cols, num_left_cols, left_join_cols,
right_cols, num_right_cols, right_join_cols,
num_cols_to_join, result_num_cols, result_cols,
left_index_out, right_index_out);
CHECK_STREAM(0);
l_index_temp.reset(nullptr);
r_index_temp.reset(nullptr);
//freeing up the temp column used to synch categories between columns
for(unsigned int column_to_free = 0; column_to_free < temp_columns_to_free.size(); column_to_free++){
gdf_column_free(temp_columns_to_free[column_to_free]);
delete temp_columns_to_free[column_to_free];
}
CHECK_STREAM(0);
return df_err;
}
gdf_error gdf_left_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::LEFT_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
gdf_error gdf_inner_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::INNER_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
gdf_error gdf_full_join(
gdf_column **left_cols,
int num_left_cols,
int left_join_cols[],
gdf_column **right_cols,
int num_right_cols,
int right_join_cols[],
int num_cols_to_join,
int result_num_cols,
gdf_column **result_cols,
gdf_column * left_indices,
gdf_column * right_indices,
gdf_context *join_context) {
return join_call_compute_df<JoinType::FULL_JOIN, output_index_type>(
left_cols,
num_left_cols,
left_join_cols,
right_cols,
num_right_cols,
right_join_cols,
num_cols_to_join,
result_num_cols,
result_cols,
left_indices,
right_indices,
join_context);
}
|
aa39c115e1e4debb4ccbf5ed4293156201830ec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\host_vector.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int *a, const int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
a[i] = a[i] + N;
}
}
void showArray(int *a, const int N)
{
for (int i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
void run()
{
const int N = 20;
int *h_a, *d_a;
int size = N * sizeof(int);
dim3 blocks(10);
dim3 threads(2);
int count;
hipGetDeviceCount(&count);
hipDeviceProp_t prop;
if (count == 1)
{
hipGetDeviceProperties(&prop, count - 1);
}
if (!prop.canMapHostMemory)
{
// cudaHostMalloc(&h_a, size, hipHostMallocMapped);
printf("cannot use map memory");
return;
}
hipHostMalloc(&h_a, size, hipHostMallocMapped);
// must be initialized after use api hipHostMalloc()
for (size_t i = 0; i < N; i++)
{
h_a[i] = i;
}
hipHostGetDevicePointer(&d_a, h_a, 0);
hipLaunchKernelGGL(( kernel) , dim3(blocks), dim3(threads), 0, 0, d_a, N);
hipDeviceSynchronize();
cout << "result: " << endl;
showArray(h_a, N);
system("pause");
hipHostFree(h_a);
}
int main()
{
run();
return 0;
} | aa39c115e1e4debb4ccbf5ed4293156201830ec2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust\device_vector.h>
#include <thrust\host_vector.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int *a, const int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
a[i] = a[i] + N;
}
}
void showArray(int *a, const int N)
{
for (int i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl;
}
void run()
{
const int N = 20;
int *h_a, *d_a;
int size = N * sizeof(int);
dim3 blocks(10);
dim3 threads(2);
int count;
cudaGetDeviceCount(&count);
cudaDeviceProp prop;
if (count == 1)
{
cudaGetDeviceProperties(&prop, count - 1);
}
if (!prop.canMapHostMemory)
{
// cudaHostMalloc(&h_a, size, cudaHostAllocMapped);
printf("cannot use map memory");
return;
}
cudaHostAlloc(&h_a, size, cudaHostAllocMapped);
// must be initialized after use api cudaHostAlloc()
for (size_t i = 0; i < N; i++)
{
h_a[i] = i;
}
cudaHostGetDevicePointer(&d_a, h_a, 0);
kernel <<<blocks, threads>>>(d_a, N);
cudaDeviceSynchronize();
cout << "result: " << endl;
showArray(h_a, N);
system("pause");
cudaFreeHost(h_a);
}
int main()
{
run();
return 0;
} |
c1ca0bf25f75463971cac077c9e992e55a3fa291.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "weighted_sigmoid_cross_entropy_loss_op.h"
namespace caffe2 {
namespace {
__global__ void ElementwiseMaxKernel(const int n, float* data, const float a) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = (data[index] > a) ? data[index] : a;
}
}
__global__ void WeightedSigmoidCrossEntropyLossKernel(
const int n,
const float pos_weight,
const float neg_weight,
const float* logits,
const int* targets,
float* losses,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] < 0. ) {
losses[index] = 0.;
counts[index] = 0.;
} else {
float c1 = targets[index];
float c2 = 1. - targets[index];
// p = 1. / 1. + expf(-x)
float p = 1. / (1. + expf(-logits[index]));
// (1 - p)**gamma * log(p) where
float term1 = pos_weight * logf(max(p, FLT_MIN));
// p**gamma * log(1 - p)
float term2 = neg_weight *
(-1. * logits[index] * (logits[index] >= 0) - logf(1. +
expf(logits[index] - 2. * logits[index] * (logits[index] >= 0))));
losses[index] = - (c1 * term1 + c2 * term2);
counts[index] = 1.;
}
}
}
__global__ void WeightedSigmoidCrossEntropyLossGradientKernel(
const int n,
const float pos_weight,
const float neg_weight,
const float* logits,
const int* targets,
float* d_logits,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] < 0.) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
float c1 = targets[index];
float c2 = (1. - targets[index]);
float p = 1. / (1. + expf(-logits[index]));
// (1-p)**g * (1 - p - g*p*log(p))
float term1 = pos_weight * (1. - p);
// (p**g) * (g*(1-p)*log(1-p) - p)
float term2 = neg_weight * ( - p);
d_logits[index] = -(c1 * term1 + c2 * term2);
counts[index] = 1.;
}
}
}
} // namespace
template <>
bool WeightedSigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto* avg_loss = Output(0);
CAFFE_ENFORCE(
X.size() == T.size(),
"Logit and target must have the same size",
"(",
X.size(),
" vs. ",
T.size(),
")");
avg_loss->Resize(vector<TIndex>());
counts_.ResizeLike(X);
losses_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
pos_weight_,
neg_weight_,
X.data<float>(),
T.data<int>(),
losses_.mutable_data<float>(),
counts_.mutable_data<float>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
hipLaunchKernelGGL(( ElementwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(normalizer_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1, avg_loss_data, normalizer_data, avg_loss_data, &context_);
}
math::Scale<float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template <>
bool WeightedSigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto& d_avg_loss = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
counts_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
pos_weight_,
neg_weight_,
X.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
counts_.mutable_data<float>());
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
hipLaunchKernelGGL(( ElementwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(normalizer_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1,
d_avg_loss.data<float>(),
normalizer_data,
normalizer_data,
&context_);
math::Scale<float, CUDAContext>(
1, scale_, normalizer_data, normalizer_data, &context_);
math::Scale<float, CUDAContext>(
dX->size(),
normalizer_data,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
} else {
math::Scale<float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyLoss,
WeightedSigmoidCrossEntropyLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyLossGradient,
WeightedSigmoidCrossEntropyLossGradientOp<float, CUDAContext>);
} // namespace caffe2
| c1ca0bf25f75463971cac077c9e992e55a3fa291.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "weighted_sigmoid_cross_entropy_loss_op.h"
namespace caffe2 {
namespace {
__global__ void ElementwiseMaxKernel(const int n, float* data, const float a) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = (data[index] > a) ? data[index] : a;
}
}
__global__ void WeightedSigmoidCrossEntropyLossKernel(
const int n,
const float pos_weight,
const float neg_weight,
const float* logits,
const int* targets,
float* losses,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] < 0. ) {
losses[index] = 0.;
counts[index] = 0.;
} else {
float c1 = targets[index];
float c2 = 1. - targets[index];
// p = 1. / 1. + expf(-x)
float p = 1. / (1. + expf(-logits[index]));
// (1 - p)**gamma * log(p) where
float term1 = pos_weight * logf(max(p, FLT_MIN));
// p**gamma * log(1 - p)
float term2 = neg_weight *
(-1. * logits[index] * (logits[index] >= 0) - logf(1. +
expf(logits[index] - 2. * logits[index] * (logits[index] >= 0))));
losses[index] = - (c1 * term1 + c2 * term2);
counts[index] = 1.;
}
}
}
__global__ void WeightedSigmoidCrossEntropyLossGradientKernel(
const int n,
const float pos_weight,
const float neg_weight,
const float* logits,
const int* targets,
float* d_logits,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] < 0.) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
float c1 = targets[index];
float c2 = (1. - targets[index]);
float p = 1. / (1. + expf(-logits[index]));
// (1-p)**g * (1 - p - g*p*log(p))
float term1 = pos_weight * (1. - p);
// (p**g) * (g*(1-p)*log(1-p) - p)
float term2 = neg_weight * ( - p);
d_logits[index] = -(c1 * term1 + c2 * term2);
counts[index] = 1.;
}
}
}
} // namespace
template <>
bool WeightedSigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto* avg_loss = Output(0);
CAFFE_ENFORCE(
X.size() == T.size(),
"Logit and target must have the same size",
"(",
X.size(),
" vs. ",
T.size(),
")");
avg_loss->Resize(vector<TIndex>());
counts_.ResizeLike(X);
losses_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
WeightedSigmoidCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
pos_weight_,
neg_weight_,
X.data<float>(),
T.data<int>(),
losses_.mutable_data<float>(),
counts_.mutable_data<float>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
ElementwiseMaxKernel<<<
CAFFE_GET_BLOCKS(normalizer_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1, avg_loss_data, normalizer_data, avg_loss_data, &context_);
}
math::Scale<float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template <>
bool WeightedSigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto& d_avg_loss = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
counts_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
WeightedSigmoidCrossEntropyLossGradientKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
pos_weight_,
neg_weight_,
X.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
counts_.mutable_data<float>());
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
ElementwiseMaxKernel<<<
CAFFE_GET_BLOCKS(normalizer_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1,
d_avg_loss.data<float>(),
normalizer_data,
normalizer_data,
&context_);
math::Scale<float, CUDAContext>(
1, scale_, normalizer_data, normalizer_data, &context_);
math::Scale<float, CUDAContext>(
dX->size(),
normalizer_data,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
} else {
math::Scale<float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyLoss,
WeightedSigmoidCrossEntropyLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyLossGradient,
WeightedSigmoidCrossEntropyLossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
633a4067b4f03bca810448149e0d8a1cdbbe949b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <ctime>
#include <iostream>
using std::cout;
using std::endl;
__inline__ __device__ void swap(float &a, float &b) {
float tmp = a;
a = b;
b = tmp;
};
__global__ void update(float *A, float *B, int N) {
float slot[4];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N * N) {
int ix = idx / N, iy = idx % N;
if (ix > 0 && ix < N - 1 && iy > 0 && iy < N - 1) {
slot[0] = A[idx - N - 1], slot[1] = A[idx - N + 1];
slot[2] = A[idx + N - 1], slot[3] = A[idx + N + 1];
if (slot[1] < slot[0]) swap(slot[0], slot[1]);
if (slot[3] < slot[2]) swap(slot[2], slot[3]);
B[idx] = A[idx] + (slot[0] < slot[2] ? fmin(slot[1], slot[2])
: fmin(slot[0], slot[3]));
}
}
}
__global__ void reduceSmemDyn(float *A, float *S, int size) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
// initialize dynamic shared memory
if (i < size)
sdata[tid] = A[i];
else
sdata[tid] = 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32) { // unrolling warp
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
S[blockIdx.x] = sdata[0]; // each block has its sum of threads within
};
// template <unsigned int GRID_X, unsigned int BLOCK_X>
__global__ void parent(float *A, float *B, int N, int GRID_X, int BLOCK_X) {
int p1 = N / 2 * N + N / 2, p2 = 37 * N + 47;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0) {
for (int i = 0; i < 5; ++i) {
hipLaunchKernelGGL(( update), dim3(GRID_X), dim3(BLOCK_X), 0, 0, A, B, N);
hipLaunchKernelGGL(( update), dim3(GRID_X), dim3(BLOCK_X), 0, 0, B, A, N);
}
// store results to B
B[p1] = A[p1];
B[p2] = A[p2];
for (int numToSum = N * N, numBlock; numToSum > 1; numToSum = numBlock) {
numBlock = (numToSum + BLOCK_X - 1) / BLOCK_X;
hipLaunchKernelGGL(( reduceSmemDyn), dim3(numBlock), dim3(BLOCK_X), BLOCK_X * sizeof(float), 0, A, A,
numToSum);
// __syncthreads();
}
}
}
void matrix_update(int N, int BLOCK_X = 128) {
int NN{N * N};
size_t nBytes = NN * sizeof(float);
float *A = (float *)malloc(nBytes);
float *B = (float *)malloc(nBytes);
float res[3] = {0, 0, 0};
int p1 = N / 2 * N + N / 2, p2 = 37 * N + 47;
// initialize
for (int k = NN - 1; k >= 0; --k) {
int i{k / N}, j{k % N};
A[k] = (1 + cos(2 * i) + sin(j)), A[k] *= A[k];
}
float *d_A, *d_B;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMemcpy(d_A, A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, A, nBytes, hipMemcpyHostToDevice);
// block size BLOCK_X x 1, grid size
dim3 block(BLOCK_X, 1);
dim3 grid((NN + BLOCK_X - 1) / BLOCK_X, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start the timer
hipEventRecord(start);
hipLaunchKernelGGL(( parent), dim3(1), dim3(block.x), 0, 0, d_A, d_B, N, grid.x, block.x);
// stop the timer
hipEventRecord(stop);
hipEventSynchronize(stop);
float millisecond = 0;
hipEventElapsedTime(&millisecond, start, stop);
hipMemcpy(&res[0], &d_A[0], sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&res[1], &d_B[p1], sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&res[2], &d_B[p2], sizeof(float), hipMemcpyDeviceToHost);
/* end timing */
cout << "grid " << grid.x << " block " << block.x << " calculation time "
<< millisecond << " sum = " << res[0] << " A[N / 2][N / 2] " << res[1]
<< " A[37][47] " << res[2] << endl;
hipFree(d_A);
hipFree(d_B);
free(A);
free(B);
}
int main(int argc, char **argv) {
int N = atoi(argv[1]); // problem size
int BLOCK_X = atoi(argv[2]); // block size
matrix_update(N, BLOCK_X);
}
| 633a4067b4f03bca810448149e0d8a1cdbbe949b.cu | #include <cmath>
#include <ctime>
#include <iostream>
using std::cout;
using std::endl;
__inline__ __device__ void swap(float &a, float &b) {
float tmp = a;
a = b;
b = tmp;
};
__global__ void update(float *A, float *B, int N) {
float slot[4];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N * N) {
int ix = idx / N, iy = idx % N;
if (ix > 0 && ix < N - 1 && iy > 0 && iy < N - 1) {
slot[0] = A[idx - N - 1], slot[1] = A[idx - N + 1];
slot[2] = A[idx + N - 1], slot[3] = A[idx + N + 1];
if (slot[1] < slot[0]) swap(slot[0], slot[1]);
if (slot[3] < slot[2]) swap(slot[2], slot[3]);
B[idx] = A[idx] + (slot[0] < slot[2] ? fmin(slot[1], slot[2])
: fmin(slot[0], slot[3]));
}
}
}
__global__ void reduceSmemDyn(float *A, float *S, int size) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
// initialize dynamic shared memory
if (i < size)
sdata[tid] = A[i];
else
sdata[tid] = 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32) { // unrolling warp
volatile float *vsmem = sdata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
S[blockIdx.x] = sdata[0]; // each block has its sum of threads within
};
// template <unsigned int GRID_X, unsigned int BLOCK_X>
__global__ void parent(float *A, float *B, int N, int GRID_X, int BLOCK_X) {
int p1 = N / 2 * N + N / 2, p2 = 37 * N + 47;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0) {
for (int i = 0; i < 5; ++i) {
update<<<GRID_X, BLOCK_X>>>(A, B, N);
update<<<GRID_X, BLOCK_X>>>(B, A, N);
}
// store results to B
B[p1] = A[p1];
B[p2] = A[p2];
for (int numToSum = N * N, numBlock; numToSum > 1; numToSum = numBlock) {
numBlock = (numToSum + BLOCK_X - 1) / BLOCK_X;
reduceSmemDyn<<<numBlock, BLOCK_X, BLOCK_X * sizeof(float)>>>(A, A,
numToSum);
// __syncthreads();
}
}
}
void matrix_update(int N, int BLOCK_X = 128) {
int NN{N * N};
size_t nBytes = NN * sizeof(float);
float *A = (float *)malloc(nBytes);
float *B = (float *)malloc(nBytes);
float res[3] = {0, 0, 0};
int p1 = N / 2 * N + N / 2, p2 = 37 * N + 47;
// initialize
for (int k = NN - 1; k >= 0; --k) {
int i{k / N}, j{k % N};
A[k] = (1 + cos(2 * i) + sin(j)), A[k] *= A[k];
}
float *d_A, *d_B;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMemcpy(d_A, A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, A, nBytes, cudaMemcpyHostToDevice);
// block size BLOCK_X x 1, grid size
dim3 block(BLOCK_X, 1);
dim3 grid((NN + BLOCK_X - 1) / BLOCK_X, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord(start);
parent<<<1, block.x>>>(d_A, d_B, N, grid.x, block.x);
// stop the timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millisecond = 0;
cudaEventElapsedTime(&millisecond, start, stop);
cudaMemcpy(&res[0], &d_A[0], sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&res[1], &d_B[p1], sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&res[2], &d_B[p2], sizeof(float), cudaMemcpyDeviceToHost);
/* end timing */
cout << "grid " << grid.x << " block " << block.x << " calculation time "
<< millisecond << " sum = " << res[0] << " A[N / 2][N / 2] " << res[1]
<< " A[37][47] " << res[2] << endl;
cudaFree(d_A);
cudaFree(d_B);
free(A);
free(B);
}
int main(int argc, char **argv) {
int N = atoi(argv[1]); // problem size
int BLOCK_X = atoi(argv[2]); // block size
matrix_update(N, BLOCK_X);
}
|
cc576cdfc2b40c374a5f96282762ff121558e1a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "lsq.h"
#include "sub.h"
#define NB 16
#define COLUMNS 256
/*
static inline int updiv(int n, int d) {
return (n+d-1)/d;
}
*/
__device__ __inline__ int dmin(int a, int b) {
if(a>b) {
return b;
} else {
return a;
}
}
// As of 5:03pm on 5/8 this version calculates dR and dA correctly. However, currently dWeights, and dY are not outputting correctly.
// This also causes dSSERR to output incorrectly because of incorrect inputs. It also appears that dD and dRHS are not currently calculated correctly.
// Update as of 11:45am 5/9 this version calculates all values correctly when NB = Matrix Size. Now working on cases when NB < Matrix Size.
__global__ void includGPU(int rows, int cols, double* dA, double* dY, double* dD, double* dR, double* dRHS, double* dSSERR, double* dWeights, int r_dim) {
//__shared__ double dXblock[(NB)*COLUMNS];
extern __shared__ double dXblock[];
__shared__ double sD[COLUMNS];
__shared__ double sRHS[COLUMNS];
const int idx = blockIdx.x*blockDim.x+threadIdx.x; // Maps to rows
const int jdx = blockIdx.y*blockDim.y+threadIdx.y; // Maps to columns
double vsmall = 2.225e-307;
int perRow = dmin(COLUMNS, cols); // Currently should not work accurately if COLUMNS < cols
double w = 0.0, xk = 0.00, di = 0.00, cbar = 0.00, sbar = 0.00, xi = 0.00, tempR = 0.00, RHSi = 0.0, xy = 0.00, yi = 0.00;
if(idx >= blockDim.x || jdx >= blockDim.y ) return;
if(threadIdx.x == 0) {
for(int i=threadIdx.y; i<perRow; i+=blockDim.y) {
sD[i] = 0.f;
sRHS[i] = 0.f;
}
}
for(int i=threadIdx.x; i<rows; i+=blockDim.x) { // i<rows
for(int j=threadIdx.y; j<perRow; j+=blockDim.y) {
dXblock[threadIdx.x*perRow+j] = dA[i*cols+j];
}
int rowsLeft = dmin(NB, rows-i+threadIdx.x);
int nextr = 0;
bool smallW = false;
w = dWeights[i];
yi = dY[i];
for(int j=0; j<cols; j++) { // j < cols
__syncthreads();
if(fabs(w) < vsmall) {
dWeights[i] = w;
dY[i] = yi;
smallW = true;
} else {
di = sD[j];
RHSi = sRHS[j];
// di = dD[j];
// RHSi = dRHS[j];
}
// for(int k=i-threadIdx.x; k<i+1; k++) {
for(int k=0; k<threadIdx.x+1; k++) {
xi = dXblock[k*perRow+j];
w = dWeights[k+i-threadIdx.x];
if(fabs(xi) >= vsmall && !smallW && fabs(w) >= vsmall) {
yi = dY[k+i-threadIdx.x];
cbar = di/(di+w*xi*xi);
sbar = w*xi/(di+w*xi*xi);
di = di+w*xi*xi;
for(int colBlock=jdx; colBlock<perRow; colBlock+=blockDim.y) {
if(colBlock > j) {
tempR = dR[nextr+colBlock-j-1];
xk = dXblock[k*perRow+colBlock];
if(k == threadIdx.x) {
dXblock[k*perRow+colBlock] = xk-xi*tempR;
dR[nextr+colBlock-j-1] = cbar*tempR+sbar*xk;
}
tempR = cbar*tempR+sbar*xk;
}
}
w = cbar*w;
xy = yi;
yi = xy-xi*RHSi;
RHSi = cbar*RHSi+sbar*xy;
}
__syncthreads();
}
nextr = nextr+cols-j-1;
for(int rowBlock=0; rowBlock<NB; rowBlock++) {
if(!smallW) {
for(int l=jdx; l<perRow; l+=blockDim.y) {
if(l == j && rowBlock == threadIdx.x) {
sD[l] = di;
// dD[l] = di;
sRHS[l] = RHSi;
//dRHS[l] = RHSi;
}
}
}
}
for(int colBlock=threadIdx.y; colBlock<perRow; colBlock+=blockDim.y) {
if(colBlock == perRow-1 && fabs(xi) >= vsmall) {
dWeights[i] = w;
dY[i] = yi;
}
}
}
for(int colBlock=threadIdx.y; colBlock<perRow; colBlock+=blockDim.y) {
dA[i*cols+colBlock] = dXblock[threadIdx.x*perRow+colBlock];
}
// This will move the values stored in the shared state to the global variables, that I will need later!
for(int j=threadIdx.y; j<perRow; j+=blockDim.y) {
dD[j] = sD[j];
dRHS[j] = sRHS[j];
}
}
__syncthreads();
/* // Used to test accuracy of the parallel code
if(idx==0 && jdx == 0) {
for(int i=0; i<r_dim; i++) {
printf("dR[%d]=%f\n", i, dR[i]);
}
for(int i=0; i<cols; i++) {
printf("D[%d]=%f rhs[%d]=%f\n", i, dD[i], i, dRHS[i]);
}
}
*/
if(jdx==0 && idx==0) { // Have to sequentially add the dSSERR values because atomic_dadd doesn't seem to work in CUDA.
for(int i=0; i<rows; i++) {
dSSERR[0] = dSSERR[0]+dWeights[i]*dY[i]*dY[i];
// printf("dSSERR[%d]=%f dWeights[%d]=%f dY[%d]=%f\n", i, dSSERR[0], i, dWeights[i], i, dY[i]);
}
}
}
void gpu_lsq(double* A, double* weights, double* y, int rows, int cols, int nbest, int max_size, double** ress, int** lopt, double* bound) {
int nvar = cols-1, r_dim = cols*(cols-1)/2;
double sserr[1], rss[cols], rhs[cols], work[cols], tol[cols], D[cols], r[r_dim];
int vorder[cols], row_ptr[cols], ifault[1], ier[1];
bool lindep[cols], tol_set[1], rss_set[1];
// double total_sumsq;
sserr[0] = 0.0;
tol_set[0] = false;
rss_set[0] = false;
for(int i=0; i<cols; i++) {
vorder[i] = i;
}
row_ptr[0] = 0;
for(int i=1; i<cols-1; i++) {
row_ptr[i] = row_ptr[i-1]+cols-i;
}
row_ptr[cols-1] = 0;
double* dA = NULL;
double* dY = NULL;
double* dD = NULL; // cols
double* dR = NULL; //r_dim
double* dRHS = NULL; //cols
double* dSSERR = NULL; // cols
double* dWeights = NULL; //rows
hipMalloc((void **)&dA, rows*(cols+1)*sizeof(double));
hipMalloc((void **)&dY, rows*sizeof(double));
hipMalloc((void **)&dD, cols*sizeof(double));
hipMalloc((void **)&dR, r_dim*sizeof(double));
hipMalloc((void **)&dRHS, cols*sizeof(double));
hipMalloc((void **)&dSSERR, sizeof(double));
hipMalloc((void **)&dWeights, rows*sizeof(double));
hipMemcpy(dA, A, rows*(cols+1)*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dY, y, rows*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dWeights, weights, rows*sizeof(double), hipMemcpyHostToDevice); // May want to consider just assuming 1 for now if this takes too long :/.
hipMemset(dD, 0.00, cols*sizeof(double));
hipMemset(dR, 0.00, r_dim*sizeof(double));
hipMemset(dRHS, 0.00, cols*sizeof(double));
hipMemset(dSSERR, 0.00, sizeof(double));
dim3 threadsPerBlock(NB,NB);
dim3 blocks(1, 1);
int shared_size = (cols+1)*NB;
hipDeviceSynchronize();
hipLaunchKernelGGL(( includGPU), dim3(blocks), dim3(threadsPerBlock), shared_size, 0, rows, cols, dA, dY, dD, dR, dRHS, dSSERR, dWeights, r_dim);
hipDeviceSynchronize();
// Transfer results back to CPU from GPU!
hipMemcpy(D, dD, cols*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(r, dR, r_dim*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(rhs, dRHS, cols*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(sserr, dSSERR, sizeof(double), hipMemcpyDeviceToHost);
// Then rest of code is CPU code. Since includ was the most time consuming step (>90% of computation this should be ok)
/****************************************************************
// This part gets translated into CUDA device code.
for(int i=0; i<rows; i++) {
xrow[0] = 1.0;
for(int j=1; j<cols; j++) {
xrow[j] = A[i*cols+j-1];
}
includ(weights[i], xrow, y[i], cols, D, r, rhs, sserr);
}
*****************************************************************/
/* //Used to determine when values are accurate
for(int i=0; i<cols; i++) {
printf("D[%d]=%f rhs[%d]=%f\n", i, D[i], i, rhs[i]);
}
for(int i=0; i<r_dim; i++) {
printf("r[%d]=%f\n", i, r[i]);
}
printf("sserr=%f\n", sserr[0]);
*/
// std::cout << "sserr = " << sserr[0] << std::endl;
sing(lindep, ifault, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
if(ifault[0] == 0) {
std::cout << "QR-factorization is not singular" << std::endl;
} else {
for(int i=0; i<nvar; i++) {
if(lindep[i])
std::cout << vorder[i] << " is exactly linearly related to earlier variables" << std::endl;
}
}
ss(cols, sserr, rss, rss_set, D, rhs);
// Set tolerances and test for singularities
tolset(cols, work, r, tol, tol_set);
sing(lindep, ier, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
if(ier[0] != 0) {
std::cout << ier[0] << " singularities detected in predictor variables" << std::endl;
std::cout << "These variables are linearly related to earlier ones:" << std::endl;
for(int i=0; i<cols; i++) {
if(lindep[i]) {
for(int j=0; j<nvar; j++) {
if(lindep[j]) {
std::cout << vorder[j] << std::endl;
}
}
break;
}
}
}
// Not sure if these three need to be called again here...
tolset(cols, work, r, tol, tol_set);
sing(lindep, ier, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
ss(cols, sserr, rss, rss_set, D, rhs);
for(int i=0; i<max_size; i++) {
report(i, rss[i], max_size, bound, nbest, ress, vorder, lopt);
}
// total_sumsq = rss[0];
int first = 1;
int last = cols;
// The next part is that I will need to implement the different subset selection techniques, pick a few
// Forward selection
// double startForwrd = CycleTimer::currentSeconds();
forwrd(first, last, ifault, cols, max_size, D, rhs, r, nbest, rss, bound, ress, vorder, lopt, rss_set, sserr, row_ptr, tol);
// double endForwrd = CycleTimer::currentSeconds();
// std::cout << "Forwrd took " << 1000.f*(endForwrd-startForwrd) << std::endl;
for(int i=first; i<max_size; i++) {
std::cout << "Best subsets found of " << i << " variables" << std::endl;
std::cout << " R.S.S. Variable numbers" << std::endl;
int pos = (i*i+i)/2;
for(int j=0; j<nbest; j++) {
std::cout << ress[i][j] << " ";
for(int k=pos; k<pos+i+1; k++) {
std::cout << lopt[j][k] << " ";
}
std::cout << std::endl;
}
}
}
| cc576cdfc2b40c374a5f96282762ff121558e1a2.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "lsq.h"
#include "sub.h"
#define NB 16
#define COLUMNS 256
/*
static inline int updiv(int n, int d) {
return (n+d-1)/d;
}
*/
__device__ __inline__ int dmin(int a, int b) {
if(a>b) {
return b;
} else {
return a;
}
}
// As of 5:03pm on 5/8 this version calculates dR and dA correctly. However, currently dWeights, and dY are not outputting correctly.
// This also causes dSSERR to output incorrectly because of incorrect inputs. It also appears that dD and dRHS are not currently calculated correctly.
// Update as of 11:45am 5/9 this version calculates all values correctly when NB = Matrix Size. Now working on cases when NB < Matrix Size.
__global__ void includGPU(int rows, int cols, double* dA, double* dY, double* dD, double* dR, double* dRHS, double* dSSERR, double* dWeights, int r_dim) {
//__shared__ double dXblock[(NB)*COLUMNS];
extern __shared__ double dXblock[];
__shared__ double sD[COLUMNS];
__shared__ double sRHS[COLUMNS];
const int idx = blockIdx.x*blockDim.x+threadIdx.x; // Maps to rows
const int jdx = blockIdx.y*blockDim.y+threadIdx.y; // Maps to columns
double vsmall = 2.225e-307;
int perRow = dmin(COLUMNS, cols); // Currently should not work accurately if COLUMNS < cols
double w = 0.0, xk = 0.00, di = 0.00, cbar = 0.00, sbar = 0.00, xi = 0.00, tempR = 0.00, RHSi = 0.0, xy = 0.00, yi = 0.00;
if(idx >= blockDim.x || jdx >= blockDim.y ) return;
if(threadIdx.x == 0) {
for(int i=threadIdx.y; i<perRow; i+=blockDim.y) {
sD[i] = 0.f;
sRHS[i] = 0.f;
}
}
for(int i=threadIdx.x; i<rows; i+=blockDim.x) { // i<rows
for(int j=threadIdx.y; j<perRow; j+=blockDim.y) {
dXblock[threadIdx.x*perRow+j] = dA[i*cols+j];
}
int rowsLeft = dmin(NB, rows-i+threadIdx.x);
int nextr = 0;
bool smallW = false;
w = dWeights[i];
yi = dY[i];
for(int j=0; j<cols; j++) { // j < cols
__syncthreads();
if(fabs(w) < vsmall) {
dWeights[i] = w;
dY[i] = yi;
smallW = true;
} else {
di = sD[j];
RHSi = sRHS[j];
// di = dD[j];
// RHSi = dRHS[j];
}
// for(int k=i-threadIdx.x; k<i+1; k++) {
for(int k=0; k<threadIdx.x+1; k++) {
xi = dXblock[k*perRow+j];
w = dWeights[k+i-threadIdx.x];
if(fabs(xi) >= vsmall && !smallW && fabs(w) >= vsmall) {
yi = dY[k+i-threadIdx.x];
cbar = di/(di+w*xi*xi);
sbar = w*xi/(di+w*xi*xi);
di = di+w*xi*xi;
for(int colBlock=jdx; colBlock<perRow; colBlock+=blockDim.y) {
if(colBlock > j) {
tempR = dR[nextr+colBlock-j-1];
xk = dXblock[k*perRow+colBlock];
if(k == threadIdx.x) {
dXblock[k*perRow+colBlock] = xk-xi*tempR;
dR[nextr+colBlock-j-1] = cbar*tempR+sbar*xk;
}
tempR = cbar*tempR+sbar*xk;
}
}
w = cbar*w;
xy = yi;
yi = xy-xi*RHSi;
RHSi = cbar*RHSi+sbar*xy;
}
__syncthreads();
}
nextr = nextr+cols-j-1;
for(int rowBlock=0; rowBlock<NB; rowBlock++) {
if(!smallW) {
for(int l=jdx; l<perRow; l+=blockDim.y) {
if(l == j && rowBlock == threadIdx.x) {
sD[l] = di;
// dD[l] = di;
sRHS[l] = RHSi;
//dRHS[l] = RHSi;
}
}
}
}
for(int colBlock=threadIdx.y; colBlock<perRow; colBlock+=blockDim.y) {
if(colBlock == perRow-1 && fabs(xi) >= vsmall) {
dWeights[i] = w;
dY[i] = yi;
}
}
}
for(int colBlock=threadIdx.y; colBlock<perRow; colBlock+=blockDim.y) {
dA[i*cols+colBlock] = dXblock[threadIdx.x*perRow+colBlock];
}
// This will move the values stored in the shared state to the global variables, that I will need later!
for(int j=threadIdx.y; j<perRow; j+=blockDim.y) {
dD[j] = sD[j];
dRHS[j] = sRHS[j];
}
}
__syncthreads();
/* // Used to test accuracy of the parallel code
if(idx==0 && jdx == 0) {
for(int i=0; i<r_dim; i++) {
printf("dR[%d]=%f\n", i, dR[i]);
}
for(int i=0; i<cols; i++) {
printf("D[%d]=%f rhs[%d]=%f\n", i, dD[i], i, dRHS[i]);
}
}
*/
if(jdx==0 && idx==0) { // Have to sequentially add the dSSERR values because atomic_dadd doesn't seem to work in CUDA.
for(int i=0; i<rows; i++) {
dSSERR[0] = dSSERR[0]+dWeights[i]*dY[i]*dY[i];
// printf("dSSERR[%d]=%f dWeights[%d]=%f dY[%d]=%f\n", i, dSSERR[0], i, dWeights[i], i, dY[i]);
}
}
}
void gpu_lsq(double* A, double* weights, double* y, int rows, int cols, int nbest, int max_size, double** ress, int** lopt, double* bound) {
int nvar = cols-1, r_dim = cols*(cols-1)/2;
double sserr[1], rss[cols], rhs[cols], work[cols], tol[cols], D[cols], r[r_dim];
int vorder[cols], row_ptr[cols], ifault[1], ier[1];
bool lindep[cols], tol_set[1], rss_set[1];
// double total_sumsq;
sserr[0] = 0.0;
tol_set[0] = false;
rss_set[0] = false;
for(int i=0; i<cols; i++) {
vorder[i] = i;
}
row_ptr[0] = 0;
for(int i=1; i<cols-1; i++) {
row_ptr[i] = row_ptr[i-1]+cols-i;
}
row_ptr[cols-1] = 0;
double* dA = NULL;
double* dY = NULL;
double* dD = NULL; // cols
double* dR = NULL; //r_dim
double* dRHS = NULL; //cols
double* dSSERR = NULL; // cols
double* dWeights = NULL; //rows
cudaMalloc((void **)&dA, rows*(cols+1)*sizeof(double));
cudaMalloc((void **)&dY, rows*sizeof(double));
cudaMalloc((void **)&dD, cols*sizeof(double));
cudaMalloc((void **)&dR, r_dim*sizeof(double));
cudaMalloc((void **)&dRHS, cols*sizeof(double));
cudaMalloc((void **)&dSSERR, sizeof(double));
cudaMalloc((void **)&dWeights, rows*sizeof(double));
cudaMemcpy(dA, A, rows*(cols+1)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dY, y, rows*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dWeights, weights, rows*sizeof(double), cudaMemcpyHostToDevice); // May want to consider just assuming 1 for now if this takes too long :/.
cudaMemset(dD, 0.00, cols*sizeof(double));
cudaMemset(dR, 0.00, r_dim*sizeof(double));
cudaMemset(dRHS, 0.00, cols*sizeof(double));
cudaMemset(dSSERR, 0.00, sizeof(double));
dim3 threadsPerBlock(NB,NB);
dim3 blocks(1, 1);
int shared_size = (cols+1)*NB;
cudaDeviceSynchronize();
includGPU<<<blocks, threadsPerBlock, shared_size>>>(rows, cols, dA, dY, dD, dR, dRHS, dSSERR, dWeights, r_dim);
cudaDeviceSynchronize();
// Transfer results back to CPU from GPU!
cudaMemcpy(D, dD, cols*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(r, dR, r_dim*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(rhs, dRHS, cols*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(sserr, dSSERR, sizeof(double), cudaMemcpyDeviceToHost);
// Then rest of code is CPU code. Since includ was the most time consuming step (>90% of computation this should be ok)
/****************************************************************
// This part gets translated into CUDA device code.
for(int i=0; i<rows; i++) {
xrow[0] = 1.0;
for(int j=1; j<cols; j++) {
xrow[j] = A[i*cols+j-1];
}
includ(weights[i], xrow, y[i], cols, D, r, rhs, sserr);
}
*****************************************************************/
/* //Used to determine when values are accurate
for(int i=0; i<cols; i++) {
printf("D[%d]=%f rhs[%d]=%f\n", i, D[i], i, rhs[i]);
}
for(int i=0; i<r_dim; i++) {
printf("r[%d]=%f\n", i, r[i]);
}
printf("sserr=%f\n", sserr[0]);
*/
// std::cout << "sserr = " << sserr[0] << std::endl;
sing(lindep, ifault, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
if(ifault[0] == 0) {
std::cout << "QR-factorization is not singular" << std::endl;
} else {
for(int i=0; i<nvar; i++) {
if(lindep[i])
std::cout << vorder[i] << " is exactly linearly related to earlier variables" << std::endl;
}
}
ss(cols, sserr, rss, rss_set, D, rhs);
// Set tolerances and test for singularities
tolset(cols, work, r, tol, tol_set);
sing(lindep, ier, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
if(ier[0] != 0) {
std::cout << ier[0] << " singularities detected in predictor variables" << std::endl;
std::cout << "These variables are linearly related to earlier ones:" << std::endl;
for(int i=0; i<cols; i++) {
if(lindep[i]) {
for(int j=0; j<nvar; j++) {
if(lindep[j]) {
std::cout << vorder[j] << std::endl;
}
}
break;
}
}
}
// Not sure if these three need to be called again here...
tolset(cols, work, r, tol, tol_set);
sing(lindep, ier, cols, D, tol_set, r, tol, row_ptr, rhs, sserr, work);
ss(cols, sserr, rss, rss_set, D, rhs);
for(int i=0; i<max_size; i++) {
report(i, rss[i], max_size, bound, nbest, ress, vorder, lopt);
}
// total_sumsq = rss[0];
int first = 1;
int last = cols;
// The next part is that I will need to implement the different subset selection techniques, pick a few
// Forward selection
// double startForwrd = CycleTimer::currentSeconds();
forwrd(first, last, ifault, cols, max_size, D, rhs, r, nbest, rss, bound, ress, vorder, lopt, rss_set, sserr, row_ptr, tol);
// double endForwrd = CycleTimer::currentSeconds();
// std::cout << "Forwrd took " << 1000.f*(endForwrd-startForwrd) << std::endl;
for(int i=first; i<max_size; i++) {
std::cout << "Best subsets found of " << i << " variables" << std::endl;
std::cout << " R.S.S. Variable numbers" << std::endl;
int pos = (i*i+i)/2;
for(int j=0; j<nbest; j++) {
std::cout << ress[i][j] << " ";
for(int k=pos; k<pos+i+1; k++) {
std::cout << lopt[j][k] << " ";
}
std::cout << std::endl;
}
}
}
|
51ce3d6dd6ee0850e6fbf6b949ff0cd0fcc85104.hip | // !!! This is a file automatically generated by hipify!!!
/*
* * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* *
* * See COPYRIGHT for license information
* */
#define NVSHMEMI_HOST_ONLY
#include "nvshmem_api.h"
#include "nvshmem_internal.h"
#include "nvshmemx_error.h"
#include "amo_kernel_entrypoints.cuh"
template <typename T>
int nvshmemi_p2p_amo_base(amo_verb_t verb, hipStream_t custrm, T *targetptr, T *retptr, T *curetptr,
T *valptr, T *cmpptr, amo_bytesdesc_t bytesdesc, const void *handle) {
int status = 0;
T val = 0, cmp = 0, ret = 0;
if (verb.is_val) {
val = *valptr;
if (verb.is_cmp) {
cmp = *cmpptr;
}
}
void *args[] = {&targetptr, &curetptr, &val, &cmp};
status = cudaLaunchKernel(handle, 1, 1, args, 0, custrm);
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "cudaLaunchKernel() failed\n");
}
if (verb.is_fetch) {
status = cuMemcpyDtoHAsync(&ret, (hipDeviceptr_t)curetptr, bytesdesc.elembytes,
custrm); /*XXX:replace by GDRcopy*/
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "cuMemcpyDtoHAsync() failed\n");
}
status = hipStreamSynchronize(custrm);
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "hipStreamSynchronize() failed\n");
}
*retptr = ret;
}
out:
return status;
}
static int nvshmemi_p2p_amo_bitwise(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
switch (bytesdesc.name_type) {
case UINT:
status = nvshmemi_p2p_amo_base<unsigned int>(
verb, custrm, (unsigned int *)targetptr, (unsigned int *)retptr,
(unsigned int *)curetptr, (unsigned int *)valptr, (unsigned int *)cmpptr, bytesdesc,
handles[UINT]);
break;
case ULONG:
status = nvshmemi_p2p_amo_base<unsigned long>(
verb, custrm, (unsigned long *)targetptr, (unsigned long *)retptr,
(unsigned long *)curetptr, (unsigned long *)valptr, (unsigned long *)cmpptr,
bytesdesc, handles[ULONG]);
break;
case ULONGLONG:
status = nvshmemi_p2p_amo_base<unsigned long long>(
verb, custrm, (unsigned long long *)targetptr, (unsigned long long *)retptr,
(unsigned long long *)curetptr, (unsigned long long *)valptr,
(unsigned long long *)cmpptr, bytesdesc, handles[ULONGLONG]);
break;
case INT32:
status = nvshmemi_p2p_amo_base<int32_t>(
verb, custrm, (int32_t *)targetptr, (int32_t *)retptr, (int32_t *)curetptr,
(int32_t *)valptr, (int32_t *)cmpptr, bytesdesc, handles[INT32]);
break;
case INT64:
status = nvshmemi_p2p_amo_base<int64_t>(
verb, custrm, (int64_t *)targetptr, (int64_t *)retptr, (int64_t *)curetptr,
(int64_t *)valptr, (int64_t *)cmpptr, bytesdesc, handles[INT64]);
break;
case UINT32:
status = nvshmemi_p2p_amo_base<uint32_t>(
verb, custrm, (uint32_t *)targetptr, (uint32_t *)retptr, (uint32_t *)curetptr,
(uint32_t *)valptr, (uint32_t *)cmpptr, bytesdesc, handles[UINT32]);
break;
case UINT64:
status = nvshmemi_p2p_amo_base<uint64_t>(
verb, custrm, (uint64_t *)targetptr, (uint64_t *)retptr, (uint64_t *)curetptr,
(uint64_t *)valptr, (uint64_t *)cmpptr, bytesdesc, handles[UINT64]);
break;
default:
status = NVSHMEMX_ERROR_INTERNAL;
fprintf(stderr, "[%d] Invalid AMO type %d\n", nvshmem_state->mype, bytesdesc.name_type);
}
return status;
}
static int nvshmemi_p2p_amo_standard(amo_verb_t verb, hipStream_t custrm, void *targetptr,
void *retptr, void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
switch (bytesdesc.name_type) {
case INT:
status = nvshmemi_p2p_amo_base<int>(verb, custrm, (int *)targetptr, (int *)retptr,
(int *)curetptr, (int *)valptr, (int *)cmpptr,
bytesdesc, handles[INT]);
break;
case LONG:
status = nvshmemi_p2p_amo_base<long>(verb, custrm, (long *)targetptr, (long *)retptr,
(long *)curetptr, (long *)valptr, (long *)cmpptr,
bytesdesc, handles[LONG]);
break;
case LONGLONG:
status = nvshmemi_p2p_amo_base<long long>(
verb, custrm, (long long *)targetptr, (long long *)retptr, (long long *)curetptr,
(long long *)valptr, (long long *)cmpptr, bytesdesc, handles[LONGLONG]);
break;
case SIZE:
status = nvshmemi_p2p_amo_base<size_t>(
verb, custrm, (size_t *)targetptr, (size_t *)retptr, (size_t *)curetptr,
(size_t *)valptr, (size_t *)cmpptr, bytesdesc, handles[SIZE]);
break;
case PTRDIFF:
status = nvshmemi_p2p_amo_base<ptrdiff_t>(
verb, custrm, (ptrdiff_t *)targetptr, (ptrdiff_t *)retptr, (ptrdiff_t *)curetptr,
(ptrdiff_t *)valptr, (ptrdiff_t *)cmpptr, bytesdesc, handles[PTRDIFF]);
break;
default:
status = nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr,
cmpptr, bytesdesc, handles);
}
return status;
}
static int nvshmemi_p2p_amo_extended(amo_verb_t verb, hipStream_t custrm, void *targetptr,
void *retptr, void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
if (bytesdesc.name_type == FLOAT) {
status = nvshmemi_p2p_amo_base<float>(verb, custrm, (float *)targetptr, (float *)retptr,
(float *)curetptr, (float *)valptr, (float *)cmpptr,
bytesdesc, handles[FLOAT]);
} else if (bytesdesc.name_type == DOUBLE) {
status = nvshmemi_p2p_amo_base<double>(verb, custrm, (double *)targetptr, (double *)retptr,
(double *)curetptr, (double *)valptr,
(double *)cmpptr, bytesdesc, handles[DOUBLE]);
} else {
nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
return status;
}
static int nvshmemi_p2p_amo_set(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicSetKernel<unsigned int>,
(void *)AtomicSetKernel<unsigned long, unsigned long long>,
(void *)AtomicSetKernel<unsigned long long>,
(void *)AtomicSetKernel<int32_t, int>,
(void *)AtomicSetKernel<int64_t, unsigned long long int>,
(void *)AtomicSetKernel<uint32_t, unsigned int>,
(void *)AtomicSetKernel<uint64_t, unsigned long long int>,
(void *)AtomicSetKernel<int>,
(void *)AtomicSetKernel<long, int>,
(void *)AtomicSetKernel<long long, unsigned long long int>,
(void *)AtomicSetKernel<size_t, unsigned long long int>,
(void *)AtomicSetKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicSetKernel<float, unsigned int>,
(void *)AtomicSetKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_inc(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicIncKernel<unsigned int>,
(void *)AtomicIncKernel<unsigned long, unsigned long long>,
(void *)AtomicIncKernel<unsigned long long>,
(void *)AtomicIncKernel<int32_t, int>,
0 /*AtomicIncKernel<int64_t>*/,
(void *)AtomicIncKernel<uint32_t, unsigned int>,
(void *)AtomicIncKernel<uint64_t, unsigned long long int>,
(void *)AtomicIncKernel<int>,
(void *)AtomicIncKernel<long, int>,
0 /*AtomicIncKernel<long long>*/,
(void *)AtomicIncKernel<size_t, unsigned long long int>,
0 /*AtomicIncKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_add(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicAddKernel<unsigned int>,
(void *)AtomicAddKernel<unsigned long, unsigned long long>,
(void *)AtomicAddKernel<unsigned long long>,
(void *)AtomicAddKernel<int32_t, int>,
0 /*AtomicAddKernel<int64_t>*/,
(void *)AtomicAddKernel<uint32_t, unsigned int>,
(void *)AtomicAddKernel<uint64_t, unsigned long long int>,
(void *)AtomicAddKernel<int>,
(void *)AtomicAddKernel<long, int>,
0 /*AtomicAddKernel<long long>*/,
(void *)AtomicAddKernel<size_t, unsigned long long int>,
0 /*AtomicAddKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_and(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicAndKernel<unsigned int>,
(void *)AtomicAndKernel<unsigned long, unsigned long long>,
(void *)AtomicAndKernel<unsigned long long>,
(void *)AtomicAndKernel<int32_t, int>,
(void *)AtomicAndKernel<int64_t, unsigned long long int>,
(void *)AtomicAndKernel<uint32_t, unsigned int>,
(void *)AtomicAndKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_or(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicOrKernel<unsigned int>,
(void *)AtomicOrKernel<unsigned long, unsigned long long>,
(void *)AtomicOrKernel<unsigned long long>,
(void *)AtomicOrKernel<int32_t, int>,
(void *)AtomicOrKernel<int64_t, unsigned long long int>,
(void *)AtomicOrKernel<uint32_t, unsigned int>,
(void *)AtomicOrKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_xor(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicXorKernel<unsigned int>,
(void *)AtomicXorKernel<unsigned long, unsigned long long>,
(void *)AtomicXorKernel<unsigned long long>,
(void *)AtomicXorKernel<int32_t, int>,
(void *)AtomicXorKernel<int64_t, unsigned long long int>,
(void *)AtomicXorKernel<uint32_t, unsigned int>,
(void *)AtomicXorKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFetchKernel<unsigned int>,
(void *)AtomicFetchKernel<unsigned long, unsigned long long>,
(void *)AtomicFetchKernel<unsigned long long>,
(void *)AtomicFetchKernel<int32_t, int>,
(void *)AtomicFetchKernel<int64_t, unsigned long long int>,
(void *)AtomicFetchKernel<uint32_t, unsigned int>,
(void *)AtomicFetchKernel<uint64_t, unsigned long long int>,
(void *)AtomicFetchKernel<int>,
(void *)AtomicFetchKernel<long, int>,
(void *)AtomicFetchKernel<long long, unsigned long long int>,
(void *)AtomicFetchKernel<size_t, unsigned long long int>,
(void *)AtomicFetchKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicFetchKernel<float, unsigned int>,
(void *)AtomicFetchKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_inc(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicFincKernel<unsigned int>,
(void *)AtomicFincKernel<unsigned long, unsigned long long>,
(void *)AtomicFincKernel<unsigned long long>,
(void *)AtomicFincKernel<int32_t, int>,
0 /*AtomicFincKernel<int64_t>*/,
(void *)AtomicFincKernel<uint32_t, unsigned int>,
(void *)AtomicFincKernel<uint64_t, unsigned long long int>,
(void *)AtomicFincKernel<int>,
(void *)AtomicFincKernel<long, int>,
0 /*AtomicFincKernel<long long>*/,
(void *)AtomicFincKernel<size_t, unsigned long long int>,
0 /*AtomicFincKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_add(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicFaddKernel<unsigned int>,
(void *)AtomicFaddKernel<unsigned long, unsigned long long>,
(void *)AtomicFaddKernel<unsigned long long>,
(void *)AtomicFaddKernel<int32_t, int>,
0 /*AtomicFaddKernel<int64_t>*/,
(void *)AtomicFaddKernel<uint32_t, unsigned int>,
(void *)AtomicFaddKernel<uint64_t, unsigned long long int>,
(void *)AtomicFaddKernel<int>,
(void *)AtomicFaddKernel<long, int>,
0 /*AtomicFaddKernel<long long>*/,
(void *)AtomicFaddKernel<size_t, unsigned long long int>,
0 /*AtomicFaddKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_swap(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicSwapKernel<unsigned int>,
(void *)AtomicSwapKernel<unsigned long, unsigned long long>,
(void *)AtomicSwapKernel<unsigned long long>,
(void *)AtomicSwapKernel<int32_t, int>,
(void *)AtomicSwapKernel<int64_t, unsigned long long int>,
(void *)AtomicSwapKernel<uint32_t, unsigned int>,
(void *)AtomicSwapKernel<uint64_t, unsigned long long int>,
(void *)AtomicSwapKernel<int>,
(void *)AtomicSwapKernel<long, int>,
(void *)AtomicSwapKernel<long long, unsigned long long int>,
(void *)AtomicSwapKernel<size_t, unsigned long long int>,
(void *)AtomicSwapKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicSwapKernel<float, unsigned int>,
(void *)AtomicSwapKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_compare_swap(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicCswapKernel<unsigned int>,
(void *)AtomicCswapKernel<unsigned long, unsigned long long>,
(void *)AtomicCswapKernel<unsigned long long>,
(void *)AtomicCswapKernel<int32_t, int>,
(void *)AtomicCswapKernel<int64_t, unsigned long long int>,
(void *)AtomicCswapKernel<uint32_t, unsigned int>,
(void *)AtomicCswapKernel<uint64_t, unsigned long long int>,
(void *)AtomicCswapKernel<int>,
(void *)AtomicCswapKernel<long, int>,
(void *)AtomicCswapKernel<long long, unsigned long long int>,
(void *)AtomicCswapKernel<size_t, unsigned long long int>,
(void *)AtomicCswapKernel<ptrdiff_t, unsigned long long int>};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_and(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFandKernel<unsigned int>,
(void *)AtomicFandKernel<unsigned long, unsigned long long>,
(void *)AtomicFandKernel<unsigned long long>,
(void *)AtomicFandKernel<int32_t, int>,
(void *)AtomicFandKernel<int64_t, unsigned long long int>,
(void *)AtomicFandKernel<uint32_t, unsigned int>,
(void *)AtomicFandKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_or(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicForKernel<unsigned int>,
(void *)AtomicForKernel<unsigned long, unsigned long long>,
(void *)AtomicForKernel<unsigned long long>,
(void *)AtomicForKernel<int32_t, int>,
(void *)AtomicForKernel<int64_t, unsigned long long int>,
(void *)AtomicForKernel<uint32_t, unsigned int>,
(void *)AtomicForKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_xor(amo_verb_t verb, hipStream_t custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFxorKernel<unsigned int>,
(void *)AtomicFxorKernel<unsigned long, unsigned long long>,
(void *)AtomicFxorKernel<unsigned long long>,
(void *)AtomicFxorKernel<int32_t, int>,
(void *)AtomicFxorKernel<int64_t, unsigned long long int>,
(void *)AtomicFxorKernel<uint32_t, unsigned int>,
(void *)AtomicFxorKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo(hipStream_t custrm, hipEvent_t cuev, void *curetptr, amo_verb_t verb,
amo_memdesc_t target, amo_bytesdesc_t bytesdesc) {
int status = 0;
switch (verb.desc) {
/*ret NULL*/
case NVSHMEMI_AMO_SET:
status = nvshmemi_p2p_amo_set(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_INC:
status =
nvshmemi_p2p_amo_inc(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_ADD:
status = nvshmemi_p2p_amo_add(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_AND:
status = nvshmemi_p2p_amo_and(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_OR:
status = nvshmemi_p2p_amo_or(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_XOR:
status = nvshmemi_p2p_amo_xor(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
/*ret !NULL*/
case NVSHMEMI_AMO_FETCH:
status =
nvshmemi_p2p_amo_fetch(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_INC:
status =
nvshmemi_p2p_amo_fetch_inc(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_ADD:
status = nvshmemi_p2p_amo_fetch_add(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_SWAP:
status = nvshmemi_p2p_amo_swap(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_COMPARE_SWAP:
status = nvshmemi_p2p_amo_compare_swap(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc);
break;
case NVSHMEMI_AMO_FETCH_AND:
status = nvshmemi_p2p_amo_fetch_and(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_OR:
status = nvshmemi_p2p_amo_fetch_or(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_XOR:
status = nvshmemi_p2p_amo_fetch_xor(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
}
return status;
}
static void nvshmemi_prepare_and_post_amo(nvshmemi_amo_t desc, void *targetptr, void *retptr,
void *valptr, void *cmpptr, size_t elembytes, int pe,
int nameoftype, const char *apiname) {
int status = 0;
amo_verb_t verb;
amo_memdesc_t target;
amo_bytesdesc_t bytesdesc;
verb.desc = desc;
switch (desc) {
case NVSHMEMI_AMO_INC:
verb.is_val = 0;
verb.is_cmp = 0;
verb.is_fetch = 0;
break;
case NVSHMEMI_AMO_SET:
case NVSHMEMI_AMO_ADD:
case NVSHMEMI_AMO_AND:
case NVSHMEMI_AMO_OR:
case NVSHMEMI_AMO_XOR:
verb.is_val = 1;
verb.is_cmp = 0;
verb.is_fetch = 0;
break;
case NVSHMEMI_AMO_FETCH:
case NVSHMEMI_AMO_FETCH_INC:
verb.is_val = 0;
verb.is_cmp = 0;
verb.is_fetch = 1;
break;
case NVSHMEMI_AMO_SWAP:
case NVSHMEMI_AMO_FETCH_ADD:
case NVSHMEMI_AMO_FETCH_AND:
case NVSHMEMI_AMO_FETCH_OR:
case NVSHMEMI_AMO_FETCH_XOR:
verb.is_val = 1;
verb.is_cmp = 0;
verb.is_fetch = 1;
break;
case NVSHMEMI_AMO_COMPARE_SWAP:
verb.is_val = 1;
verb.is_cmp = 1;
verb.is_fetch = 1;
break;
}
bytesdesc.elembytes = elembytes;
bytesdesc.name_type = nameoftype;
volatile void *targetptr_actual =
(volatile void *)((char *)(nvshmem_state->peer_heap_base[pe]) +
((char *)targetptr - (char *)(nvshmem_state->heap_base)));
target.ptr = (void *)targetptr_actual;
target.retptr = retptr;
target.valptr = valptr;
target.cmpptr = cmpptr;
void *curetptr = (void *)nvshmem_state->curets[pe];
if (targetptr_actual) {
hipStream_t custrm = nvshmem_state->custreams[pe % MAX_PEER_STREAMS];
hipEvent_t cuev = nvshmem_state->cuevents[pe % MAX_PEER_STREAMS];
if (nvshmem_state
->p2p_attrib_native_atomic_support[pe]) { /*AMO not supported for P2P over PCIE*/
status = nvshmemi_p2p_amo(custrm, cuev, curetptr, verb, target,
bytesdesc); /*bypass transport for P2P*/
} else {
ERROR_PRINT("[%d] %s to PE %d does not have P2P path\n", nvshmem_state->mype, apiname,
pe);
}
} else {
int t = nvshmem_state->selected_transport_for_amo[pe];
if (t < 0) {
ERROR_EXIT("[%d] amo not supported on transport to pe: %d \n", nvshmem_state->mype, pe);
}
nvshmemt_ep_t ep;
int tcount = nvshmem_state->transport_count;
struct nvshmem_transport *tcurr = nvshmem_state->transports[t];
int ep_offset = pe * tcurr->ep_count;
ep = tcurr->ep[ep_offset];
nvshmem_mem_handle_t *handles = nvshmem_state->handles;
target.handle = handles[pe * tcount + t];
status = nvshmem_state->amo[pe](ep, curetptr, verb, target, bytesdesc);
}
if (status) {
ERROR_EXIT("[%d] aborting due to error in %s \n", nvshmem_state->mype, apiname);
}
}
#define NVSHMEM_TYPE_INC(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_inc(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_INC, (void *)target, 0, 0, 0, sizeof(TYPE), pe, NameIdx, \
"nvshmem_" #Name "_atomic_inc"); \
}
#define NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_inc(TYPE *target, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_inc() not implemented", nvshmem_state->mype); \
}
NVSHMEM_TYPE_INC(uint, UINT, unsigned int)
NVSHMEM_TYPE_INC(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_INC(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_INC(int32, INT32, int32_t)
NVSHMEM_TYPE_INC(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_INC(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_INC(int, INT, int)
NVSHMEM_TYPE_INC(long, LONG, long)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_INC(size, SIZE, size_t)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_ADD(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_add(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_ADD, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_add"); \
}
#define NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_add(TYPE *target, TYPE value, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_add() not implemented", nvshmem_state->mype); \
}
NVSHMEM_TYPE_ADD(uint, UINT, unsigned int)
NVSHMEM_TYPE_ADD(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_ADD(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_ADD(int32, INT32, int32_t)
NVSHMEM_TYPE_ADD(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_ADD(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_ADD(int, INT, int)
NVSHMEM_TYPE_ADD(long, LONG, long)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_ADD(size, SIZE, size_t)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_SET(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_set(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_SET, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_set"); \
}
NVSHMEM_TYPE_SET(uint, UINT, unsigned int)
NVSHMEM_TYPE_SET(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_SET(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_SET(int32, INT32, int32_t)
NVSHMEM_TYPE_SET(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_SET(int64, INT64, int64_t)
NVSHMEM_TYPE_SET(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_SET(int, INT, int)
NVSHMEM_TYPE_SET(long, LONG, long)
NVSHMEM_TYPE_SET(longlong, LONGLONG, long long)
NVSHMEM_TYPE_SET(size, SIZE, size_t)
NVSHMEM_TYPE_SET(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_SET(float, FLOAT, float)
NVSHMEM_TYPE_SET(double, DOUBLE, double)
#define NVSHMEM_TYPE_AND(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_and(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_AND, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_and"); \
}
NVSHMEM_TYPE_AND(uint, UINT, unsigned int)
NVSHMEM_TYPE_AND(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_AND(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_AND(int32, INT32, int32_t)
NVSHMEM_TYPE_AND(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_AND(int64, INT64, int64_t)
NVSHMEM_TYPE_AND(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_OR(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_or(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_OR, (void *)target, 0, &value, 0, sizeof(TYPE), pe, NameIdx, \
"nvshmem_" #Name "_atomic_or"); \
}
NVSHMEM_TYPE_OR(uint, UINT, unsigned int)
NVSHMEM_TYPE_OR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_OR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_OR(int32, INT32, int32_t)
NVSHMEM_TYPE_OR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_OR(int64, INT64, int64_t)
NVSHMEM_TYPE_OR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_XOR(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_xor(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_XOR, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_xor"); \
}
NVSHMEM_TYPE_XOR(uint, UINT, unsigned int)
NVSHMEM_TYPE_XOR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_XOR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_XOR(int32, INT32, int32_t)
NVSHMEM_TYPE_XOR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_XOR(int64, INT64, int64_t)
NVSHMEM_TYPE_XOR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH, (void *)target, (void *)&ret, 0, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_fetch"); \
return ret; \
}
NVSHMEM_TYPE_FETCH(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH(int, INT, int)
NVSHMEM_TYPE_FETCH(long, LONG, long)
NVSHMEM_TYPE_FETCH(longlong, LONGLONG, long long)
NVSHMEM_TYPE_FETCH(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_FETCH(float, FLOAT, float)
NVSHMEM_TYPE_FETCH(double, DOUBLE, double)
#define NVSHMEM_TYPE_FETCH_INC(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_inc(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_INC, (void *)target, (void *)&ret, 0, 0, sizeof(TYPE), \
pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_inc"); \
return ret; \
}
#define NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_inc(TYPE *target, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_fetch_inc() not implemented", nvshmem_state->mype); \
return 0; \
}
NVSHMEM_TYPE_FETCH_INC(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_INC(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_INC(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_INC(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_INC(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_INC(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH_INC(int, INT, int)
NVSHMEM_TYPE_FETCH_INC(long, LONG, long)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_INC(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_FETCH_ADD(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_add(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_ADD, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_add"); \
return ret; \
}
#define NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_add(TYPE *target, TYPE value, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_fadd() not implemented", nvshmem_state->mype); \
return 0; \
}
NVSHMEM_TYPE_FETCH_ADD(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_ADD(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_ADD(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_ADD(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_ADD(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_ADD(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH_ADD(int, INT, int)
NVSHMEM_TYPE_FETCH_ADD(long, LONG, long)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_ADD(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_SWAP(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_swap(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_SWAP, (void *)target, (void *)&ret, &value, 0, sizeof(TYPE), \
pe, NameIdx, "nvshmem_" #Name "_atomic_swap"); \
return ret; \
}
NVSHMEM_TYPE_SWAP(uint, UINT, unsigned int)
NVSHMEM_TYPE_SWAP(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_SWAP(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_SWAP(int32, INT32, int32_t)
NVSHMEM_TYPE_SWAP(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_SWAP(int64, INT64, int64_t)
NVSHMEM_TYPE_SWAP(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_SWAP(int, INT, int)
NVSHMEM_TYPE_SWAP(long, LONG, long)
NVSHMEM_TYPE_SWAP(longlong, LONGLONG, long long)
NVSHMEM_TYPE_SWAP(size, SIZE, size_t)
NVSHMEM_TYPE_SWAP(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_SWAP(float, FLOAT, float)
NVSHMEM_TYPE_SWAP(double, DOUBLE, double)
#define NVSHMEM_TYPE_COMPARE_SWAP(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_compare_swap(TYPE *target, TYPE cond, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_COMPARE_SWAP, (void *)target, (void *)&ret, &value, &cond, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "atomic_compare_swap"); \
return ret; \
}
NVSHMEM_TYPE_COMPARE_SWAP(uint, UINT, unsigned int)
NVSHMEM_TYPE_COMPARE_SWAP(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_COMPARE_SWAP(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_COMPARE_SWAP(int32, INT32, int32_t)
NVSHMEM_TYPE_COMPARE_SWAP(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_COMPARE_SWAP(int64, INT64, int64_t)
NVSHMEM_TYPE_COMPARE_SWAP(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_COMPARE_SWAP(int, INT, int)
NVSHMEM_TYPE_COMPARE_SWAP(long, LONG, long)
NVSHMEM_TYPE_COMPARE_SWAP(longlong, LONGLONG, long long)
NVSHMEM_TYPE_COMPARE_SWAP(size, SIZE, size_t)
NVSHMEM_TYPE_COMPARE_SWAP(ptrdiff, PTRDIFF, ptrdiff_t)
#define NVSHMEM_TYPE_FETCH_AND(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_and(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_AND, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_and"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_AND(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_AND(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_AND(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_AND(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_AND(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_AND(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_AND(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH_OR(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_or(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_OR, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_or"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_OR(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_OR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_OR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_OR(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_OR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_OR(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_OR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH_XOR(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_xor(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_XOR, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_xor"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_XOR(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_XOR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_XOR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_XOR(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_XOR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_XOR(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_XOR(uint64, UINT64, uint64_t)
| 51ce3d6dd6ee0850e6fbf6b949ff0cd0fcc85104.cu | /*
* * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* *
* * See COPYRIGHT for license information
* */
#define NVSHMEMI_HOST_ONLY
#include "nvshmem_api.h"
#include "nvshmem_internal.h"
#include "nvshmemx_error.h"
#include "amo_kernel_entrypoints.cuh"
template <typename T>
int nvshmemi_p2p_amo_base(amo_verb_t verb, CUstream custrm, T *targetptr, T *retptr, T *curetptr,
T *valptr, T *cmpptr, amo_bytesdesc_t bytesdesc, const void *handle) {
int status = 0;
T val = 0, cmp = 0, ret = 0;
if (verb.is_val) {
val = *valptr;
if (verb.is_cmp) {
cmp = *cmpptr;
}
}
void *args[] = {&targetptr, &curetptr, &val, &cmp};
status = cudaLaunchKernel(handle, 1, 1, args, 0, custrm);
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "cudaLaunchKernel() failed\n");
}
if (verb.is_fetch) {
status = cuMemcpyDtoHAsync(&ret, (CUdeviceptr)curetptr, bytesdesc.elembytes,
custrm); /*XXX:replace by GDRcopy*/
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "cuMemcpyDtoHAsync() failed\n");
}
status = cuStreamSynchronize(custrm);
if (status) {
NZ_ERROR_JMP(status, NVSHMEMX_ERROR_INTERNAL, out, "cuStreamSynchronize() failed\n");
}
*retptr = ret;
}
out:
return status;
}
static int nvshmemi_p2p_amo_bitwise(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
switch (bytesdesc.name_type) {
case UINT:
status = nvshmemi_p2p_amo_base<unsigned int>(
verb, custrm, (unsigned int *)targetptr, (unsigned int *)retptr,
(unsigned int *)curetptr, (unsigned int *)valptr, (unsigned int *)cmpptr, bytesdesc,
handles[UINT]);
break;
case ULONG:
status = nvshmemi_p2p_amo_base<unsigned long>(
verb, custrm, (unsigned long *)targetptr, (unsigned long *)retptr,
(unsigned long *)curetptr, (unsigned long *)valptr, (unsigned long *)cmpptr,
bytesdesc, handles[ULONG]);
break;
case ULONGLONG:
status = nvshmemi_p2p_amo_base<unsigned long long>(
verb, custrm, (unsigned long long *)targetptr, (unsigned long long *)retptr,
(unsigned long long *)curetptr, (unsigned long long *)valptr,
(unsigned long long *)cmpptr, bytesdesc, handles[ULONGLONG]);
break;
case INT32:
status = nvshmemi_p2p_amo_base<int32_t>(
verb, custrm, (int32_t *)targetptr, (int32_t *)retptr, (int32_t *)curetptr,
(int32_t *)valptr, (int32_t *)cmpptr, bytesdesc, handles[INT32]);
break;
case INT64:
status = nvshmemi_p2p_amo_base<int64_t>(
verb, custrm, (int64_t *)targetptr, (int64_t *)retptr, (int64_t *)curetptr,
(int64_t *)valptr, (int64_t *)cmpptr, bytesdesc, handles[INT64]);
break;
case UINT32:
status = nvshmemi_p2p_amo_base<uint32_t>(
verb, custrm, (uint32_t *)targetptr, (uint32_t *)retptr, (uint32_t *)curetptr,
(uint32_t *)valptr, (uint32_t *)cmpptr, bytesdesc, handles[UINT32]);
break;
case UINT64:
status = nvshmemi_p2p_amo_base<uint64_t>(
verb, custrm, (uint64_t *)targetptr, (uint64_t *)retptr, (uint64_t *)curetptr,
(uint64_t *)valptr, (uint64_t *)cmpptr, bytesdesc, handles[UINT64]);
break;
default:
status = NVSHMEMX_ERROR_INTERNAL;
fprintf(stderr, "[%d] Invalid AMO type %d\n", nvshmem_state->mype, bytesdesc.name_type);
}
return status;
}
static int nvshmemi_p2p_amo_standard(amo_verb_t verb, CUstream custrm, void *targetptr,
void *retptr, void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
switch (bytesdesc.name_type) {
case INT:
status = nvshmemi_p2p_amo_base<int>(verb, custrm, (int *)targetptr, (int *)retptr,
(int *)curetptr, (int *)valptr, (int *)cmpptr,
bytesdesc, handles[INT]);
break;
case LONG:
status = nvshmemi_p2p_amo_base<long>(verb, custrm, (long *)targetptr, (long *)retptr,
(long *)curetptr, (long *)valptr, (long *)cmpptr,
bytesdesc, handles[LONG]);
break;
case LONGLONG:
status = nvshmemi_p2p_amo_base<long long>(
verb, custrm, (long long *)targetptr, (long long *)retptr, (long long *)curetptr,
(long long *)valptr, (long long *)cmpptr, bytesdesc, handles[LONGLONG]);
break;
case SIZE:
status = nvshmemi_p2p_amo_base<size_t>(
verb, custrm, (size_t *)targetptr, (size_t *)retptr, (size_t *)curetptr,
(size_t *)valptr, (size_t *)cmpptr, bytesdesc, handles[SIZE]);
break;
case PTRDIFF:
status = nvshmemi_p2p_amo_base<ptrdiff_t>(
verb, custrm, (ptrdiff_t *)targetptr, (ptrdiff_t *)retptr, (ptrdiff_t *)curetptr,
(ptrdiff_t *)valptr, (ptrdiff_t *)cmpptr, bytesdesc, handles[PTRDIFF]);
break;
default:
status = nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr,
cmpptr, bytesdesc, handles);
}
return status;
}
static int nvshmemi_p2p_amo_extended(amo_verb_t verb, CUstream custrm, void *targetptr,
void *retptr, void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc, const void **handles) {
int status = 0;
if (bytesdesc.name_type == FLOAT) {
status = nvshmemi_p2p_amo_base<float>(verb, custrm, (float *)targetptr, (float *)retptr,
(float *)curetptr, (float *)valptr, (float *)cmpptr,
bytesdesc, handles[FLOAT]);
} else if (bytesdesc.name_type == DOUBLE) {
status = nvshmemi_p2p_amo_base<double>(verb, custrm, (double *)targetptr, (double *)retptr,
(double *)curetptr, (double *)valptr,
(double *)cmpptr, bytesdesc, handles[DOUBLE]);
} else {
nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
return status;
}
static int nvshmemi_p2p_amo_set(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicSetKernel<unsigned int>,
(void *)AtomicSetKernel<unsigned long, unsigned long long>,
(void *)AtomicSetKernel<unsigned long long>,
(void *)AtomicSetKernel<int32_t, int>,
(void *)AtomicSetKernel<int64_t, unsigned long long int>,
(void *)AtomicSetKernel<uint32_t, unsigned int>,
(void *)AtomicSetKernel<uint64_t, unsigned long long int>,
(void *)AtomicSetKernel<int>,
(void *)AtomicSetKernel<long, int>,
(void *)AtomicSetKernel<long long, unsigned long long int>,
(void *)AtomicSetKernel<size_t, unsigned long long int>,
(void *)AtomicSetKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicSetKernel<float, unsigned int>,
(void *)AtomicSetKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_inc(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicIncKernel<unsigned int>,
(void *)AtomicIncKernel<unsigned long, unsigned long long>,
(void *)AtomicIncKernel<unsigned long long>,
(void *)AtomicIncKernel<int32_t, int>,
0 /*AtomicIncKernel<int64_t>*/,
(void *)AtomicIncKernel<uint32_t, unsigned int>,
(void *)AtomicIncKernel<uint64_t, unsigned long long int>,
(void *)AtomicIncKernel<int>,
(void *)AtomicIncKernel<long, int>,
0 /*AtomicIncKernel<long long>*/,
(void *)AtomicIncKernel<size_t, unsigned long long int>,
0 /*AtomicIncKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_add(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicAddKernel<unsigned int>,
(void *)AtomicAddKernel<unsigned long, unsigned long long>,
(void *)AtomicAddKernel<unsigned long long>,
(void *)AtomicAddKernel<int32_t, int>,
0 /*AtomicAddKernel<int64_t>*/,
(void *)AtomicAddKernel<uint32_t, unsigned int>,
(void *)AtomicAddKernel<uint64_t, unsigned long long int>,
(void *)AtomicAddKernel<int>,
(void *)AtomicAddKernel<long, int>,
0 /*AtomicAddKernel<long long>*/,
(void *)AtomicAddKernel<size_t, unsigned long long int>,
0 /*AtomicAddKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_and(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicAndKernel<unsigned int>,
(void *)AtomicAndKernel<unsigned long, unsigned long long>,
(void *)AtomicAndKernel<unsigned long long>,
(void *)AtomicAndKernel<int32_t, int>,
(void *)AtomicAndKernel<int64_t, unsigned long long int>,
(void *)AtomicAndKernel<uint32_t, unsigned int>,
(void *)AtomicAndKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_or(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicOrKernel<unsigned int>,
(void *)AtomicOrKernel<unsigned long, unsigned long long>,
(void *)AtomicOrKernel<unsigned long long>,
(void *)AtomicOrKernel<int32_t, int>,
(void *)AtomicOrKernel<int64_t, unsigned long long int>,
(void *)AtomicOrKernel<uint32_t, unsigned int>,
(void *)AtomicOrKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_xor(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicXorKernel<unsigned int>,
(void *)AtomicXorKernel<unsigned long, unsigned long long>,
(void *)AtomicXorKernel<unsigned long long>,
(void *)AtomicXorKernel<int32_t, int>,
(void *)AtomicXorKernel<int64_t, unsigned long long int>,
(void *)AtomicXorKernel<uint32_t, unsigned int>,
(void *)AtomicXorKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFetchKernel<unsigned int>,
(void *)AtomicFetchKernel<unsigned long, unsigned long long>,
(void *)AtomicFetchKernel<unsigned long long>,
(void *)AtomicFetchKernel<int32_t, int>,
(void *)AtomicFetchKernel<int64_t, unsigned long long int>,
(void *)AtomicFetchKernel<uint32_t, unsigned int>,
(void *)AtomicFetchKernel<uint64_t, unsigned long long int>,
(void *)AtomicFetchKernel<int>,
(void *)AtomicFetchKernel<long, int>,
(void *)AtomicFetchKernel<long long, unsigned long long int>,
(void *)AtomicFetchKernel<size_t, unsigned long long int>,
(void *)AtomicFetchKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicFetchKernel<float, unsigned int>,
(void *)AtomicFetchKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_inc(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicFincKernel<unsigned int>,
(void *)AtomicFincKernel<unsigned long, unsigned long long>,
(void *)AtomicFincKernel<unsigned long long>,
(void *)AtomicFincKernel<int32_t, int>,
0 /*AtomicFincKernel<int64_t>*/,
(void *)AtomicFincKernel<uint32_t, unsigned int>,
(void *)AtomicFincKernel<uint64_t, unsigned long long int>,
(void *)AtomicFincKernel<int>,
(void *)AtomicFincKernel<long, int>,
0 /*AtomicFincKernel<long long>*/,
(void *)AtomicFincKernel<size_t, unsigned long long int>,
0 /*AtomicFincKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_add(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
/*XXX not implemented types : long long, ptrdiff_t, int64_t*/
const void *handles[] = {
(void *)AtomicFaddKernel<unsigned int>,
(void *)AtomicFaddKernel<unsigned long, unsigned long long>,
(void *)AtomicFaddKernel<unsigned long long>,
(void *)AtomicFaddKernel<int32_t, int>,
0 /*AtomicFaddKernel<int64_t>*/,
(void *)AtomicFaddKernel<uint32_t, unsigned int>,
(void *)AtomicFaddKernel<uint64_t, unsigned long long int>,
(void *)AtomicFaddKernel<int>,
(void *)AtomicFaddKernel<long, int>,
0 /*AtomicFaddKernel<long long>*/,
(void *)AtomicFaddKernel<size_t, unsigned long long int>,
0 /*AtomicFaddKernel<ptrdiff_t>*/
};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_swap(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicSwapKernel<unsigned int>,
(void *)AtomicSwapKernel<unsigned long, unsigned long long>,
(void *)AtomicSwapKernel<unsigned long long>,
(void *)AtomicSwapKernel<int32_t, int>,
(void *)AtomicSwapKernel<int64_t, unsigned long long int>,
(void *)AtomicSwapKernel<uint32_t, unsigned int>,
(void *)AtomicSwapKernel<uint64_t, unsigned long long int>,
(void *)AtomicSwapKernel<int>,
(void *)AtomicSwapKernel<long, int>,
(void *)AtomicSwapKernel<long long, unsigned long long int>,
(void *)AtomicSwapKernel<size_t, unsigned long long int>,
(void *)AtomicSwapKernel<ptrdiff_t, unsigned long long int>,
(void *)AtomicSwapKernel<float, unsigned int>,
(void *)AtomicSwapKernel<double, unsigned long long int>};
return nvshmemi_p2p_amo_extended(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_compare_swap(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicCswapKernel<unsigned int>,
(void *)AtomicCswapKernel<unsigned long, unsigned long long>,
(void *)AtomicCswapKernel<unsigned long long>,
(void *)AtomicCswapKernel<int32_t, int>,
(void *)AtomicCswapKernel<int64_t, unsigned long long int>,
(void *)AtomicCswapKernel<uint32_t, unsigned int>,
(void *)AtomicCswapKernel<uint64_t, unsigned long long int>,
(void *)AtomicCswapKernel<int>,
(void *)AtomicCswapKernel<long, int>,
(void *)AtomicCswapKernel<long long, unsigned long long int>,
(void *)AtomicCswapKernel<size_t, unsigned long long int>,
(void *)AtomicCswapKernel<ptrdiff_t, unsigned long long int>};
return nvshmemi_p2p_amo_standard(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_and(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFandKernel<unsigned int>,
(void *)AtomicFandKernel<unsigned long, unsigned long long>,
(void *)AtomicFandKernel<unsigned long long>,
(void *)AtomicFandKernel<int32_t, int>,
(void *)AtomicFandKernel<int64_t, unsigned long long int>,
(void *)AtomicFandKernel<uint32_t, unsigned int>,
(void *)AtomicFandKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_or(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicForKernel<unsigned int>,
(void *)AtomicForKernel<unsigned long, unsigned long long>,
(void *)AtomicForKernel<unsigned long long>,
(void *)AtomicForKernel<int32_t, int>,
(void *)AtomicForKernel<int64_t, unsigned long long int>,
(void *)AtomicForKernel<uint32_t, unsigned int>,
(void *)AtomicForKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo_fetch_xor(amo_verb_t verb, CUstream custrm, void *targetptr, void *retptr,
void *curetptr, void *valptr, void *cmpptr,
amo_bytesdesc_t bytesdesc) {
const void *handles[] = {(void *)AtomicFxorKernel<unsigned int>,
(void *)AtomicFxorKernel<unsigned long, unsigned long long>,
(void *)AtomicFxorKernel<unsigned long long>,
(void *)AtomicFxorKernel<int32_t, int>,
(void *)AtomicFxorKernel<int64_t, unsigned long long int>,
(void *)AtomicFxorKernel<uint32_t, unsigned int>,
(void *)AtomicFxorKernel<uint64_t, unsigned long long int>};
return nvshmemi_p2p_amo_bitwise(verb, custrm, targetptr, retptr, curetptr, valptr, cmpptr,
bytesdesc, handles);
}
static int nvshmemi_p2p_amo(CUstream custrm, CUevent cuev, void *curetptr, amo_verb_t verb,
amo_memdesc_t target, amo_bytesdesc_t bytesdesc) {
int status = 0;
switch (verb.desc) {
/*ret NULL*/
case NVSHMEMI_AMO_SET:
status = nvshmemi_p2p_amo_set(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_INC:
status =
nvshmemi_p2p_amo_inc(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_ADD:
status = nvshmemi_p2p_amo_add(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_AND:
status = nvshmemi_p2p_amo_and(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_OR:
status = nvshmemi_p2p_amo_or(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_XOR:
status = nvshmemi_p2p_amo_xor(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
/*ret !NULL*/
case NVSHMEMI_AMO_FETCH:
status =
nvshmemi_p2p_amo_fetch(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_INC:
status =
nvshmemi_p2p_amo_fetch_inc(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*val, cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_ADD:
status = nvshmemi_p2p_amo_fetch_add(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_SWAP:
status = nvshmemi_p2p_amo_swap(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_COMPARE_SWAP:
status = nvshmemi_p2p_amo_compare_swap(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc);
break;
case NVSHMEMI_AMO_FETCH_AND:
status = nvshmemi_p2p_amo_fetch_and(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_OR:
status = nvshmemi_p2p_amo_fetch_or(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
case NVSHMEMI_AMO_FETCH_XOR:
status = nvshmemi_p2p_amo_fetch_xor(verb, custrm, target.ptr, target.retptr, curetptr,
target.valptr, target.cmpptr, bytesdesc); /*cmp NULL*/
break;
}
return status;
}
static void nvshmemi_prepare_and_post_amo(nvshmemi_amo_t desc, void *targetptr, void *retptr,
void *valptr, void *cmpptr, size_t elembytes, int pe,
int nameoftype, const char *apiname) {
int status = 0;
amo_verb_t verb;
amo_memdesc_t target;
amo_bytesdesc_t bytesdesc;
verb.desc = desc;
switch (desc) {
case NVSHMEMI_AMO_INC:
verb.is_val = 0;
verb.is_cmp = 0;
verb.is_fetch = 0;
break;
case NVSHMEMI_AMO_SET:
case NVSHMEMI_AMO_ADD:
case NVSHMEMI_AMO_AND:
case NVSHMEMI_AMO_OR:
case NVSHMEMI_AMO_XOR:
verb.is_val = 1;
verb.is_cmp = 0;
verb.is_fetch = 0;
break;
case NVSHMEMI_AMO_FETCH:
case NVSHMEMI_AMO_FETCH_INC:
verb.is_val = 0;
verb.is_cmp = 0;
verb.is_fetch = 1;
break;
case NVSHMEMI_AMO_SWAP:
case NVSHMEMI_AMO_FETCH_ADD:
case NVSHMEMI_AMO_FETCH_AND:
case NVSHMEMI_AMO_FETCH_OR:
case NVSHMEMI_AMO_FETCH_XOR:
verb.is_val = 1;
verb.is_cmp = 0;
verb.is_fetch = 1;
break;
case NVSHMEMI_AMO_COMPARE_SWAP:
verb.is_val = 1;
verb.is_cmp = 1;
verb.is_fetch = 1;
break;
}
bytesdesc.elembytes = elembytes;
bytesdesc.name_type = nameoftype;
volatile void *targetptr_actual =
(volatile void *)((char *)(nvshmem_state->peer_heap_base[pe]) +
((char *)targetptr - (char *)(nvshmem_state->heap_base)));
target.ptr = (void *)targetptr_actual;
target.retptr = retptr;
target.valptr = valptr;
target.cmpptr = cmpptr;
void *curetptr = (void *)nvshmem_state->curets[pe];
if (targetptr_actual) {
CUstream custrm = nvshmem_state->custreams[pe % MAX_PEER_STREAMS];
CUevent cuev = nvshmem_state->cuevents[pe % MAX_PEER_STREAMS];
if (nvshmem_state
->p2p_attrib_native_atomic_support[pe]) { /*AMO not supported for P2P over PCIE*/
status = nvshmemi_p2p_amo(custrm, cuev, curetptr, verb, target,
bytesdesc); /*bypass transport for P2P*/
} else {
ERROR_PRINT("[%d] %s to PE %d does not have P2P path\n", nvshmem_state->mype, apiname,
pe);
}
} else {
int t = nvshmem_state->selected_transport_for_amo[pe];
if (t < 0) {
ERROR_EXIT("[%d] amo not supported on transport to pe: %d \n", nvshmem_state->mype, pe);
}
nvshmemt_ep_t ep;
int tcount = nvshmem_state->transport_count;
struct nvshmem_transport *tcurr = nvshmem_state->transports[t];
int ep_offset = pe * tcurr->ep_count;
ep = tcurr->ep[ep_offset];
nvshmem_mem_handle_t *handles = nvshmem_state->handles;
target.handle = handles[pe * tcount + t];
status = nvshmem_state->amo[pe](ep, curetptr, verb, target, bytesdesc);
}
if (status) {
ERROR_EXIT("[%d] aborting due to error in %s \n", nvshmem_state->mype, apiname);
}
}
#define NVSHMEM_TYPE_INC(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_inc(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_INC, (void *)target, 0, 0, 0, sizeof(TYPE), pe, NameIdx, \
"nvshmem_" #Name "_atomic_inc"); \
}
#define NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_inc(TYPE *target, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_inc() not implemented", nvshmem_state->mype); \
}
NVSHMEM_TYPE_INC(uint, UINT, unsigned int)
NVSHMEM_TYPE_INC(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_INC(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_INC(int32, INT32, int32_t)
NVSHMEM_TYPE_INC(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_INC(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_INC(int, INT, int)
NVSHMEM_TYPE_INC(long, LONG, long)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_INC(size, SIZE, size_t)
NVSHMEM_TYPE_INC_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_ADD(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_add(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_ADD, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_add"); \
}
#define NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_add(TYPE *target, TYPE value, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_add() not implemented", nvshmem_state->mype); \
}
NVSHMEM_TYPE_ADD(uint, UINT, unsigned int)
NVSHMEM_TYPE_ADD(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_ADD(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_ADD(int32, INT32, int32_t)
NVSHMEM_TYPE_ADD(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_ADD(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_ADD(int, INT, int)
NVSHMEM_TYPE_ADD(long, LONG, long)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_ADD(size, SIZE, size_t)
NVSHMEM_TYPE_ADD_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_SET(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_set(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_SET, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_set"); \
}
NVSHMEM_TYPE_SET(uint, UINT, unsigned int)
NVSHMEM_TYPE_SET(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_SET(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_SET(int32, INT32, int32_t)
NVSHMEM_TYPE_SET(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_SET(int64, INT64, int64_t)
NVSHMEM_TYPE_SET(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_SET(int, INT, int)
NVSHMEM_TYPE_SET(long, LONG, long)
NVSHMEM_TYPE_SET(longlong, LONGLONG, long long)
NVSHMEM_TYPE_SET(size, SIZE, size_t)
NVSHMEM_TYPE_SET(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_SET(float, FLOAT, float)
NVSHMEM_TYPE_SET(double, DOUBLE, double)
#define NVSHMEM_TYPE_AND(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_and(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_AND, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_and"); \
}
NVSHMEM_TYPE_AND(uint, UINT, unsigned int)
NVSHMEM_TYPE_AND(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_AND(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_AND(int32, INT32, int32_t)
NVSHMEM_TYPE_AND(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_AND(int64, INT64, int64_t)
NVSHMEM_TYPE_AND(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_OR(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_or(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_OR, (void *)target, 0, &value, 0, sizeof(TYPE), pe, NameIdx, \
"nvshmem_" #Name "_atomic_or"); \
}
NVSHMEM_TYPE_OR(uint, UINT, unsigned int)
NVSHMEM_TYPE_OR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_OR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_OR(int32, INT32, int32_t)
NVSHMEM_TYPE_OR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_OR(int64, INT64, int64_t)
NVSHMEM_TYPE_OR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_XOR(Name, NameIdx, TYPE) \
void nvshmem_##Name##_atomic_xor(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_XOR, (void *)target, 0, &value, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_xor"); \
}
NVSHMEM_TYPE_XOR(uint, UINT, unsigned int)
NVSHMEM_TYPE_XOR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_XOR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_XOR(int32, INT32, int32_t)
NVSHMEM_TYPE_XOR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_XOR(int64, INT64, int64_t)
NVSHMEM_TYPE_XOR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH, (void *)target, (void *)&ret, 0, 0, sizeof(TYPE), pe, \
NameIdx, "nvshmem_" #Name "_atomic_fetch"); \
return ret; \
}
NVSHMEM_TYPE_FETCH(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH(int, INT, int)
NVSHMEM_TYPE_FETCH(long, LONG, long)
NVSHMEM_TYPE_FETCH(longlong, LONGLONG, long long)
NVSHMEM_TYPE_FETCH(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_FETCH(float, FLOAT, float)
NVSHMEM_TYPE_FETCH(double, DOUBLE, double)
#define NVSHMEM_TYPE_FETCH_INC(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_inc(TYPE *target, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_INC, (void *)target, (void *)&ret, 0, 0, sizeof(TYPE), \
pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_inc"); \
return ret; \
}
#define NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_inc(TYPE *target, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_fetch_inc() not implemented", nvshmem_state->mype); \
return 0; \
}
NVSHMEM_TYPE_FETCH_INC(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_INC(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_INC(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_INC(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_INC(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_INC(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH_INC(int, INT, int)
NVSHMEM_TYPE_FETCH_INC(long, LONG, long)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_INC(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH_INC_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_FETCH_ADD(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_add(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_ADD, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_add"); \
return ret; \
}
#define NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_add(TYPE *target, TYPE value, int pe) { \
ERROR_PRINT("[%d] nvshmem_" #Name "_atomic_fadd() not implemented", nvshmem_state->mype); \
return 0; \
}
NVSHMEM_TYPE_FETCH_ADD(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_ADD(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_ADD(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_ADD(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_ADD(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(int64, INT64, int64_t) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_ADD(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_FETCH_ADD(int, INT, int)
NVSHMEM_TYPE_FETCH_ADD(long, LONG, long)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(longlong, LONGLONG, long long) /*XXX:not implemented*/
NVSHMEM_TYPE_FETCH_ADD(size, SIZE, size_t)
NVSHMEM_TYPE_FETCH_ADD_NOT_IMPLEMENTED(ptrdiff, PTRDIFF, ptrdiff_t) /*XXX:not implemented*/
#define NVSHMEM_TYPE_SWAP(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_swap(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_SWAP, (void *)target, (void *)&ret, &value, 0, sizeof(TYPE), \
pe, NameIdx, "nvshmem_" #Name "_atomic_swap"); \
return ret; \
}
NVSHMEM_TYPE_SWAP(uint, UINT, unsigned int)
NVSHMEM_TYPE_SWAP(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_SWAP(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_SWAP(int32, INT32, int32_t)
NVSHMEM_TYPE_SWAP(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_SWAP(int64, INT64, int64_t)
NVSHMEM_TYPE_SWAP(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_SWAP(int, INT, int)
NVSHMEM_TYPE_SWAP(long, LONG, long)
NVSHMEM_TYPE_SWAP(longlong, LONGLONG, long long)
NVSHMEM_TYPE_SWAP(size, SIZE, size_t)
NVSHMEM_TYPE_SWAP(ptrdiff, PTRDIFF, ptrdiff_t)
NVSHMEM_TYPE_SWAP(float, FLOAT, float)
NVSHMEM_TYPE_SWAP(double, DOUBLE, double)
#define NVSHMEM_TYPE_COMPARE_SWAP(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_compare_swap(TYPE *target, TYPE cond, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_COMPARE_SWAP, (void *)target, (void *)&ret, &value, &cond, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "atomic_compare_swap"); \
return ret; \
}
NVSHMEM_TYPE_COMPARE_SWAP(uint, UINT, unsigned int)
NVSHMEM_TYPE_COMPARE_SWAP(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_COMPARE_SWAP(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_COMPARE_SWAP(int32, INT32, int32_t)
NVSHMEM_TYPE_COMPARE_SWAP(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_COMPARE_SWAP(int64, INT64, int64_t)
NVSHMEM_TYPE_COMPARE_SWAP(uint64, UINT64, uint64_t)
NVSHMEM_TYPE_COMPARE_SWAP(int, INT, int)
NVSHMEM_TYPE_COMPARE_SWAP(long, LONG, long)
NVSHMEM_TYPE_COMPARE_SWAP(longlong, LONGLONG, long long)
NVSHMEM_TYPE_COMPARE_SWAP(size, SIZE, size_t)
NVSHMEM_TYPE_COMPARE_SWAP(ptrdiff, PTRDIFF, ptrdiff_t)
#define NVSHMEM_TYPE_FETCH_AND(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_and(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_AND, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_and"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_AND(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_AND(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_AND(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_AND(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_AND(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_AND(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_AND(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH_OR(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_or(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_OR, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_or"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_OR(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_OR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_OR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_OR(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_OR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_OR(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_OR(uint64, UINT64, uint64_t)
#define NVSHMEM_TYPE_FETCH_XOR(Name, NameIdx, TYPE) \
TYPE nvshmem_##Name##_atomic_fetch_xor(TYPE *target, TYPE value, int pe) { \
NVSHMEM_CHECK_STATE_AND_INIT(); \
TYPE ret; \
nvshmemi_prepare_and_post_amo(NVSHMEMI_AMO_FETCH_XOR, (void *)target, (void *)&ret, &value, 0, \
sizeof(TYPE), pe, NameIdx, "nvshmem_" #Name "_atomic_fetch_xor"); \
return ret; \
}
NVSHMEM_TYPE_FETCH_XOR(uint, UINT, unsigned int)
NVSHMEM_TYPE_FETCH_XOR(ulong, ULONG, unsigned long)
NVSHMEM_TYPE_FETCH_XOR(ulonglong, ULONGLONG, unsigned long long)
NVSHMEM_TYPE_FETCH_XOR(int32, INT32, int32_t)
NVSHMEM_TYPE_FETCH_XOR(uint32, UINT32, uint32_t)
NVSHMEM_TYPE_FETCH_XOR(int64, INT64, int64_t)
NVSHMEM_TYPE_FETCH_XOR(uint64, UINT64, uint64_t)
|
bac426075b92993a65c9643df13d1b49c46f0593.hip | // !!! This is a file automatically generated by hipify!!!
// To Compile it
// gcc -m32 -o test.out test.c sac.a
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
#include <math.h>
#include "crsmex.h"
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
extern "C"{
#include <sacio.h>
#include <sac.h>
}
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Define the maximum length of the data array */
#define MAX_ARRAY 100000
#define NSAC 20
#define N_FILENAME 100
#define MAX_PATH 100
#define GRID_SIZE 1
#define BLOCK_SIZE 4
//char *strstrip(char *s); // Deletes trailing characters when reading filenames. Similar to .rtrip() in Python.
void usage(); // Show usage
void print_array(float **array, int nsac, int npts, int step);
void print_array(float **array, int M, int N);
void print_fft( hipfftComplex *fft, int batch, int size_fft);
void check_gpu_card_type(void);
void plot_array(float **array, int M, int N);
void plot_fft(int N);
void run_unit_test();
const char CONFIG_FILENAME[]="config.conf";
__global__ void find_repeaters(float *data, int npts);
int main(int argc, char **argv)
{
/* Define variables to be used in the call to rsac1() */
float yarray[MAX_ARRAY];
float beg, del;
int nlen, nerr, max = MAX_ARRAY, opt = 0;
float *data;
char kname[ N_FILENAME ] ;
char infilename[ N_FILENAME ] ;
FILE *fid;
size_t len=0;
int count=0;
int win_size=512;
hipfftReal *device_data;
char *line;
size_t line_size = 100;
/* Filtering variables */
struct config_filter configstruct;
configstruct = get_config(CONFIG_FILENAME);
/* CUDA configuration */
int grdSize = GRID_SIZE;
int blockSize = BLOCK_SIZE;
dim3 dimGrid(grdSize, grdSize, grdSize);
dim3 dimBlock(blockSize, blockSize, blockSize);
/*
printf("Low(int) = %f\n",configstruct.low);
printf("High(int) = %f\n",configstruct.high);
printf("Attenuation(int) = %f\n",configstruct.attenuation);
printf("Transition Band(int) = %f\n",configstruct.transition_band);
printf("Npoles = %d\n",configstruct.npoles);
printf("passes = %d\n",configstruct.passes);
*/
if( argc == 1 ) {
usage();
exit(-1);
}
// Check is a GPU card is available.
check_gpu_card_type();
// Retrieve input parameters
while((opt = getopt(argc, argv, "f:t")) != -1){
switch(opt){
case 't':
run_unit_test();
exit(-1);
break;
case 'f':
strncpy(infilename, optarg, MAX_PATH);
break;
default:
fprintf(stderr, "Unknown option %c\n\n",opt);
usage();
exit(-1);
}
}
line = (char *)malloc(line_size * sizeof(char));
// for (int i=0; i<NSAC; i++)
// data[i] = (float *)malloc( MAX_ARRAY * sizeof(float));
data = (float *)malloc(NSAC * MAX_ARRAY * sizeof(float));
// Read input filenames.
fid = fopen(infilename,"r");
if (fid == NULL){
fprintf(stderr,"Couldn't open file %s\n",infilename);
exit(-1);
}
// Read sac files into host memory.
while (getline(&line, &len, fid) != -1)
{
line = strstrip(line);
strcpy ( kname ,line ) ;
rsac1( kname, yarray, &nlen, &beg, &del, &max, &nerr, strlen( kname ) ) ;
if ( nerr != 0 ) {
fprintf(stderr, "Error reading in SAC file: %s\n", kname);
exit ( nerr ) ;
}
else {
fprintf(stderr,"Reading SUCCESS: %s\n",kname);
fprintf(stderr,"Number of samples read: %d\n\n",nlen);
}
/* START - FILTERING */
/* Call xapiir ( Apply a IIR Filter )
* - yarray - Original Data
* - nlen - Number of points in yarray
* - proto - Prototype of Filter
* - SAC_FILTER_BUTTERWORK - Butterworth
* - SAC_FILTER_BESSEL - Bessel
* - SAC_FILTER_CHEBYSHEV_TYPE_I - Chebyshev Type I
* - SAC_FILTER_CHEBYSHEV_TYPE_II - Chebyshev Type II
* - transition_bandwidth (Only for Chebyshev Filter)
* - Bandwidth as a fraction of the lowpass prototype
* cutoff frequency
* - attenuation (Only for Chebyshev Filter)
* - Attenuation factor, equals amplitude reached at
* stopband egde
* - order - Number of poles or order of the analog prototype
* 4 - 5 should be ample
* Cannot exceed 10
* - type - Type of Filter
* - SAC_FILTER_BANDPASS
* - SAC_FILTER_BANDREJECT
* - SAC_FILTER_LOWPASS
* - SAC_FILTER_HIGHPASS
* - low - Low Frequency Cutoff [ Hertz ]
* Ignored on SAC_FILTER_LOWPASS
* - high - High Frequency Cutoff [ Hertz ]
* Ignored on SAC_FILTER_HIGHPASS
* - delta - Sampling Interval [ seconds ]
* - passes - Number of passes
* - 1 Forward filter only
* - 2 Forward and reverse (i.e. zero-phase) filtering
*/
/*
xapiir(yarray, nlen, (char *)SAC_BUTTERWORTH,
configstruct.transition_band, configstruct.attenuation,
configstruct.npoles,
(char *)SAC_HIGHPASS,
configstruct.low, configstruct.high,
del, configstruct.passes);
END */
memcpy(&data[count*MAX_ARRAY], yarray, nlen*sizeof(float));
count++;
}
/* CUDA FFT */
hipfftHandle plan;
hipfftComplex *fft_data;
cuFloatComplex *fft_data_conj;
hipfftComplex *hostOutputFFT;
int rank = 1; // --- 1D FFTs
int n[] = { nlen }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = MAX_ARRAY, odist = (nlen / 2 + 1); // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int size_fft = (win_size / 2 + 1);
int batch = count; // --- Number of batched executions
printf(" ********** CONFG *********\n");
printf(" rank = %d\n", rank );
printf(" n[0] = %d\n", n[0] );
printf(" inembed = %d\n", inembed[0] );
printf(" istride = %d\n", istride );
printf(" onembed = %d\n", onembed[0] );
printf(" ostride = %d\n", ostride );
printf(" odist = %d\n", odist );
printf(" batch = %d\n", batch );
printf(" count = %d\n", count );
printf(" size_fft = %d\n", size_fft );
printf(" **************************\n");
// Initiazilizing device data for fft processing
gpuErrchk(hipMalloc((void**)&device_data, MAX_ARRAY * count * sizeof(hipfftReal )));
gpuErrchk(hipMalloc((void**)&fft_data, size_fft * count * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&fft_data_conj, size_fft * count * sizeof(hipfftComplex)));
//fft_data_conj = cuConjf(fft_data);
hostOutputFFT = (hipfftComplex*)malloc( size_fft * count * sizeof(hipfftComplex));
gpuErrchk(hipMemcpy(device_data, data, MAX_ARRAY * count * sizeof(float) , hipMemcpyHostToDevice));
hipfftPlanMany(&plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist, HIPFFT_R2C, batch);
hipfftExecR2C(plan, device_data, fft_data);
hipfftDestroy(plan);
gpuErrchk(hipMemcpy(hostOutputFFT, fft_data, size_fft * count * sizeof(hipfftComplex), hipMemcpyDeviceToHost));
printf(" %f %f\n", hostOutputFFT[0].x,hostOutputFFT[0].y );
print_fft(hostOutputFFT, batch, size_fft);
plot_fft(batch);
//print_array(data,count,nlen);
/*
hipMemcpy2DToArray(device_data,
0,
0,
data,
MAX_ARRAY * sizeof(float),
nlen * sizeof(float),
count * sizeof(float), hipMemcpyHostToDevice);
*/
printf("n = %d\n", n[0]);
hipLaunchKernelGGL((
find_repeaters), dim3(count), dim3(nlen) , 0, 0, device_data, nlen);
/* Closing */
gpuErrchk(hipFree(device_data));
gpuErrchk(hipFree(fft_data));
hipfftDestroy(plan);
free(data);
fclose(fid);
if (line)
free(line);
hipDeviceReset();
return EXIT_SUCCESS;
}
__global__ void find_repeaters(float *data,int npts){
__shared__ float* trace;
trace = (float *)malloc(npts*sizeof(float));
for(int currentBlockOfPoints = 0; currentBlockOfPoints < gridDim.x; currentBlockOfPoints++)
trace[threadIdx.x] = data[threadIdx.x + currentBlockOfPoints*npts];
}
/*
// Strips trailing characters
char *strstrip(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
while (*s && isspace(*s))
s++;
return s;
}
*/
void usage(){
fprintf(stderr,"\nCUDA CRSMEX - Characteristic Repeating Earthquakes Code \n\n");
fprintf(stderr," This program looks for characteristic repeating earthquakes using GPU/CUDA\n");
fprintf(stderr," Required options:\n");
fprintf(stderr," -f filenames.dat - filenames.dat must containt a list of all files to be analyzed.\n\n");
fprintf(stderr," Author: Luis A. Dominguez - [email protected]\n\n");
}
void print_fft(hipfftComplex *fft, int batch, int size_fft)
{
FILE *fout;
// print out individual files
char filename[] = "outputX.dat";
for (int i = 0; i < batch; i++){
filename[6] = i + '0';
printf("Writting file: %s\n", filename);
fout = fopen(filename, "w");
fprintf(stdout, "data size = %d\n", size_fft);
for (int j = 0; j < size_fft; j++){
fprintf(fout, "%f %f %f \n", fft[i*size_fft + j].x, fft[i*size_fft + j].y,
sqrt(fft[i*size_fft + j].x*fft[i*size_fft + j].x + fft[i*size_fft + j].y*fft[i*size_fft + j].y));
}
fclose(fout);
}
}
//fclose(fout);
void plot_fft(int N)
{
FILE *gnuplot = NULL;
char filename[] = "outputX.dat";
gnuplot=popen("gnuplot","w");
fprintf(gnuplot,"set term postscript eps enhanced color\n");
for(int i=0; i<N; i++ ){
filename[6] = i + '0';
fprintf(stdout, "Plot array using gnuplot - %s\n", filename);
fprintf(gnuplot, "set logscale xz\n");
fprintf(gnuplot, "set output 'graphics_fft_%i.eps'\n", i);
fprintf(gnuplot, "plot '%s' u 3 with lines\n", filename);
fprintf(gnuplot, "set output\n");
fflush(gnuplot);
}
pclose(gnuplot);
}
void print_array(float **array, int M, int N)
{
FILE *fout;
fprintf(stdout,"M = %d\n",M);
fprintf(stdout,"N = %d\n",N);
fout = fopen("data.dat","w");
for (int i = 0; i < M; i++){
for (int j = 0; j < N; j++)
fprintf(fout,"%8.3f ",array[i][j]);
fprintf(fout,"\n");
}
fprintf(stdout, "Writing file data.dat\n");
fclose(fout);
}
void print_array(float *array, int nsac, int npts, int step)
{
FILE *fout;
fprintf(stdout,"nsac = %d\n", nsac);
fprintf(stdout,"npts = %d\n", npts);
fout = fopen("data.dat","w");
for (int i = 0; i < npts; i++){
for (int j = 0; j < nsac; j++)
fprintf(fout,"%8.3f ",array[j*step + i]);
fprintf(fout,"\n");
}
fprintf(stdout, "Writing file data.dat\n");
fclose(fout);
}
void run_unit_test(){
float *data;
int nsac = 0;
int npts = 0;
int N = 3; // Only memory for three waveforms is reserved.
char filename_test[]="./unit_test/unit_test.dat";
struct config_filter configstruct;
int win_size = 512;
configstruct = get_config(CONFIG_FILENAME);
fprintf(stdout,"\n*** RUNING TEST UNIT ***\n\n");
data = (float *)malloc(N * MAX_ARRAY * sizeof(float));
load_sac_in_host_memory(data, filename_test, &nsac, &npts, win_size, true, configstruct);
print_array(data, nsac, npts, MAX_ARRAY);
}
void check_gpu_card_type()
{
int nDevices;
hipGetDeviceCount(&nDevices);
if (nDevices == 0){
fprintf(stderr,"ERROR - No GPU card detected.\n");
exit(-1);
}
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
| bac426075b92993a65c9643df13d1b49c46f0593.cu | // To Compile it
// gcc -m32 -o test.out test.c sac.a
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
#include <math.h>
#include "crsmex.h"
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
extern "C"{
#include <sacio.h>
#include <sac.h>
}
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Define the maximum length of the data array */
#define MAX_ARRAY 100000
#define NSAC 20
#define N_FILENAME 100
#define MAX_PATH 100
#define GRID_SIZE 1
#define BLOCK_SIZE 4
//char *strstrip(char *s); // Deletes trailing characters when reading filenames. Similar to .rtrip() in Python.
void usage(); // Show usage
void print_array(float **array, int nsac, int npts, int step);
void print_array(float **array, int M, int N);
void print_fft( cufftComplex *fft, int batch, int size_fft);
void check_gpu_card_type(void);
void plot_array(float **array, int M, int N);
void plot_fft(int N);
void run_unit_test();
const char CONFIG_FILENAME[]="config.conf";
__global__ void find_repeaters(float *data, int npts);
int main(int argc, char **argv)
{
/* Define variables to be used in the call to rsac1() */
float yarray[MAX_ARRAY];
float beg, del;
int nlen, nerr, max = MAX_ARRAY, opt = 0;
float *data;
char kname[ N_FILENAME ] ;
char infilename[ N_FILENAME ] ;
FILE *fid;
size_t len=0;
int count=0;
int win_size=512;
cufftReal *device_data;
char *line;
size_t line_size = 100;
/* Filtering variables */
struct config_filter configstruct;
configstruct = get_config(CONFIG_FILENAME);
/* CUDA configuration */
int grdSize = GRID_SIZE;
int blockSize = BLOCK_SIZE;
dim3 dimGrid(grdSize, grdSize, grdSize);
dim3 dimBlock(blockSize, blockSize, blockSize);
/*
printf("Low(int) = %f\n",configstruct.low);
printf("High(int) = %f\n",configstruct.high);
printf("Attenuation(int) = %f\n",configstruct.attenuation);
printf("Transition Band(int) = %f\n",configstruct.transition_band);
printf("Npoles = %d\n",configstruct.npoles);
printf("passes = %d\n",configstruct.passes);
*/
if( argc == 1 ) {
usage();
exit(-1);
}
// Check is a GPU card is available.
check_gpu_card_type();
// Retrieve input parameters
while((opt = getopt(argc, argv, "f:t")) != -1){
switch(opt){
case 't':
run_unit_test();
exit(-1);
break;
case 'f':
strncpy(infilename, optarg, MAX_PATH);
break;
default:
fprintf(stderr, "Unknown option %c\n\n",opt);
usage();
exit(-1);
}
}
line = (char *)malloc(line_size * sizeof(char));
// for (int i=0; i<NSAC; i++)
// data[i] = (float *)malloc( MAX_ARRAY * sizeof(float));
data = (float *)malloc(NSAC * MAX_ARRAY * sizeof(float));
// Read input filenames.
fid = fopen(infilename,"r");
if (fid == NULL){
fprintf(stderr,"Couldn't open file %s\n",infilename);
exit(-1);
}
// Read sac files into host memory.
while (getline(&line, &len, fid) != -1)
{
line = strstrip(line);
strcpy ( kname ,line ) ;
rsac1( kname, yarray, &nlen, &beg, &del, &max, &nerr, strlen( kname ) ) ;
if ( nerr != 0 ) {
fprintf(stderr, "Error reading in SAC file: %s\n", kname);
exit ( nerr ) ;
}
else {
fprintf(stderr,"Reading SUCCESS: %s\n",kname);
fprintf(stderr,"Number of samples read: %d\n\n",nlen);
}
/* START - FILTERING */
/* Call xapiir ( Apply a IIR Filter )
* - yarray - Original Data
* - nlen - Number of points in yarray
* - proto - Prototype of Filter
* - SAC_FILTER_BUTTERWORK - Butterworth
* - SAC_FILTER_BESSEL - Bessel
* - SAC_FILTER_CHEBYSHEV_TYPE_I - Chebyshev Type I
* - SAC_FILTER_CHEBYSHEV_TYPE_II - Chebyshev Type II
* - transition_bandwidth (Only for Chebyshev Filter)
* - Bandwidth as a fraction of the lowpass prototype
* cutoff frequency
* - attenuation (Only for Chebyshev Filter)
* - Attenuation factor, equals amplitude reached at
* stopband egde
* - order - Number of poles or order of the analog prototype
* 4 - 5 should be ample
* Cannot exceed 10
* - type - Type of Filter
* - SAC_FILTER_BANDPASS
* - SAC_FILTER_BANDREJECT
* - SAC_FILTER_LOWPASS
* - SAC_FILTER_HIGHPASS
* - low - Low Frequency Cutoff [ Hertz ]
* Ignored on SAC_FILTER_LOWPASS
* - high - High Frequency Cutoff [ Hertz ]
* Ignored on SAC_FILTER_HIGHPASS
* - delta - Sampling Interval [ seconds ]
* - passes - Number of passes
* - 1 Forward filter only
* - 2 Forward and reverse (i.e. zero-phase) filtering
*/
/*
xapiir(yarray, nlen, (char *)SAC_BUTTERWORTH,
configstruct.transition_band, configstruct.attenuation,
configstruct.npoles,
(char *)SAC_HIGHPASS,
configstruct.low, configstruct.high,
del, configstruct.passes);
END */
memcpy(&data[count*MAX_ARRAY], yarray, nlen*sizeof(float));
count++;
}
/* CUDA FFT */
cufftHandle plan;
cufftComplex *fft_data;
cuFloatComplex *fft_data_conj;
cufftComplex *hostOutputFFT;
int rank = 1; // --- 1D FFTs
int n[] = { nlen }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = MAX_ARRAY, odist = (nlen / 2 + 1); // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int size_fft = (win_size / 2 + 1);
int batch = count; // --- Number of batched executions
printf(" ********** CONFG *********\n");
printf(" rank = %d\n", rank );
printf(" n[0] = %d\n", n[0] );
printf(" inembed = %d\n", inembed[0] );
printf(" istride = %d\n", istride );
printf(" onembed = %d\n", onembed[0] );
printf(" ostride = %d\n", ostride );
printf(" odist = %d\n", odist );
printf(" batch = %d\n", batch );
printf(" count = %d\n", count );
printf(" size_fft = %d\n", size_fft );
printf(" **************************\n");
// Initiazilizing device data for fft processing
gpuErrchk(cudaMalloc((void**)&device_data, MAX_ARRAY * count * sizeof(cufftReal )));
gpuErrchk(cudaMalloc((void**)&fft_data, size_fft * count * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&fft_data_conj, size_fft * count * sizeof(cufftComplex)));
//fft_data_conj = cuConjf(fft_data);
hostOutputFFT = (cufftComplex*)malloc( size_fft * count * sizeof(cufftComplex));
gpuErrchk(cudaMemcpy(device_data, data, MAX_ARRAY * count * sizeof(float) , cudaMemcpyHostToDevice));
cufftPlanMany(&plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_R2C, batch);
cufftExecR2C(plan, device_data, fft_data);
cufftDestroy(plan);
gpuErrchk(cudaMemcpy(hostOutputFFT, fft_data, size_fft * count * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
printf(" %f %f\n", hostOutputFFT[0].x,hostOutputFFT[0].y );
print_fft(hostOutputFFT, batch, size_fft);
plot_fft(batch);
//print_array(data,count,nlen);
/*
cudaMemcpy2DToArray(device_data,
0,
0,
data,
MAX_ARRAY * sizeof(float),
nlen * sizeof(float),
count * sizeof(float), cudaMemcpyHostToDevice);
*/
printf("n = %d\n", n[0]);
find_repeaters<<<count, nlen >>> (device_data, nlen);
/* Closing */
gpuErrchk(cudaFree(device_data));
gpuErrchk(cudaFree(fft_data));
cufftDestroy(plan);
free(data);
fclose(fid);
if (line)
free(line);
cudaDeviceReset();
return EXIT_SUCCESS;
}
__global__ void find_repeaters(float *data,int npts){
__shared__ float* trace;
trace = (float *)malloc(npts*sizeof(float));
for(int currentBlockOfPoints = 0; currentBlockOfPoints < gridDim.x; currentBlockOfPoints++)
trace[threadIdx.x] = data[threadIdx.x + currentBlockOfPoints*npts];
}
/*
// Strips trailing characters
char *strstrip(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
while (*s && isspace(*s))
s++;
return s;
}
*/
void usage(){
fprintf(stderr,"\nCUDA CRSMEX - Characteristic Repeating Earthquakes Code \n\n");
fprintf(stderr," This program looks for characteristic repeating earthquakes using GPU/CUDA\n");
fprintf(stderr," Required options:\n");
fprintf(stderr," -f filenames.dat - filenames.dat must containt a list of all files to be analyzed.\n\n");
fprintf(stderr," Author: Luis A. Dominguez - [email protected]\n\n");
}
void print_fft(cufftComplex *fft, int batch, int size_fft)
{
FILE *fout;
// print out individual files
char filename[] = "outputX.dat";
for (int i = 0; i < batch; i++){
filename[6] = i + '0';
printf("Writting file: %s\n", filename);
fout = fopen(filename, "w");
fprintf(stdout, "data size = %d\n", size_fft);
for (int j = 0; j < size_fft; j++){
fprintf(fout, "%f %f %f \n", fft[i*size_fft + j].x, fft[i*size_fft + j].y,
sqrt(fft[i*size_fft + j].x*fft[i*size_fft + j].x + fft[i*size_fft + j].y*fft[i*size_fft + j].y));
}
fclose(fout);
}
}
//fclose(fout);
void plot_fft(int N)
{
FILE *gnuplot = NULL;
char filename[] = "outputX.dat";
gnuplot=popen("gnuplot","w");
fprintf(gnuplot,"set term postscript eps enhanced color\n");
for(int i=0; i<N; i++ ){
filename[6] = i + '0';
fprintf(stdout, "Plot array using gnuplot - %s\n", filename);
fprintf(gnuplot, "set logscale xz\n");
fprintf(gnuplot, "set output 'graphics_fft_%i.eps'\n", i);
fprintf(gnuplot, "plot '%s' u 3 with lines\n", filename);
fprintf(gnuplot, "set output\n");
fflush(gnuplot);
}
pclose(gnuplot);
}
void print_array(float **array, int M, int N)
{
FILE *fout;
fprintf(stdout,"M = %d\n",M);
fprintf(stdout,"N = %d\n",N);
fout = fopen("data.dat","w");
for (int i = 0; i < M; i++){
for (int j = 0; j < N; j++)
fprintf(fout,"%8.3f ",array[i][j]);
fprintf(fout,"\n");
}
fprintf(stdout, "Writing file data.dat\n");
fclose(fout);
}
void print_array(float *array, int nsac, int npts, int step)
{
FILE *fout;
fprintf(stdout,"nsac = %d\n", nsac);
fprintf(stdout,"npts = %d\n", npts);
fout = fopen("data.dat","w");
for (int i = 0; i < npts; i++){
for (int j = 0; j < nsac; j++)
fprintf(fout,"%8.3f ",array[j*step + i]);
fprintf(fout,"\n");
}
fprintf(stdout, "Writing file data.dat\n");
fclose(fout);
}
void run_unit_test(){
float *data;
int nsac = 0;
int npts = 0;
int N = 3; // Only memory for three waveforms is reserved.
char filename_test[]="./unit_test/unit_test.dat";
struct config_filter configstruct;
int win_size = 512;
configstruct = get_config(CONFIG_FILENAME);
fprintf(stdout,"\n*** RUNING TEST UNIT ***\n\n");
data = (float *)malloc(N * MAX_ARRAY * sizeof(float));
load_sac_in_host_memory(data, filename_test, &nsac, &npts, win_size, true, configstruct);
print_array(data, nsac, npts, MAX_ARRAY);
}
void check_gpu_card_type()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
if (nDevices == 0){
fprintf(stderr,"ERROR - No GPU card detected.\n");
exit(-1);
}
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
41a7dd2a42aeffc849bfd75bc4fea5374e740a70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Bibliotecas Basicas
//Biblioteca Thrust
//Biblioteca cuRAND
//PARAMETROS GLOBAIS
const int QUANT_PAIS_AVALIA = 4;
int POP_TAM = 200;
int N_CIDADES = 20;
int BLOCKSIZE = 1024;
int TOTALTHREADS = 2048;
int N_GERA = 100;
const int MUT = 10;
const int MAX = 19;
const int MIN = 0;
const int ELITE = 2;
/*
* Busca por erros nos processos da gpu
*/
__global__ void popInicial(unsigned int n,unsigned int np,int* v, int* genes, int* ale)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<n; i+=stride)
{
for(int j=0; j<np; j++)
{
int p = (ale[i*np+j]<j)?j:ale[i*np+j];
v[i*np+j] = genes[i*np+p];
int aux = genes[i*np+j];
genes[i*np +j] = genes[i*np+p];
genes[i*np+p]=aux;
}
}
} | 41a7dd2a42aeffc849bfd75bc4fea5374e740a70.cu | #include "includes.h"
//Bibliotecas Basicas
//Biblioteca Thrust
//Biblioteca cuRAND
//PARAMETROS GLOBAIS
const int QUANT_PAIS_AVALIA = 4;
int POP_TAM = 200;
int N_CIDADES = 20;
int BLOCKSIZE = 1024;
int TOTALTHREADS = 2048;
int N_GERA = 100;
const int MUT = 10;
const int MAX = 19;
const int MIN = 0;
const int ELITE = 2;
/*
* Busca por erros nos processos da gpu
*/
__global__ void popInicial(unsigned int n,unsigned int np,int* v, int* genes, int* ale)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<n; i+=stride)
{
for(int j=0; j<np; j++)
{
int p = (ale[i*np+j]<j)?j:ale[i*np+j];
v[i*np+j] = genes[i*np+p];
int aux = genes[i*np+j];
genes[i*np +j] = genes[i*np+p];
genes[i*np+p]=aux;
}
}
} |
07eaf7fe9f0a8a00474a00165770bd30588f7d9e.hip | // !!! This is a file automatically generated by hipify!!!
//
// Cuda Sudoku Solver
//
// Created by Arpit Jain
// Copyright (c) 2014 New York University. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define NUM_ITERATION 10000
#define INIT_TEMPERATURE 0.4
#define MIN_TEMPERATURE 0.001
#define INIT_TOLERANCE 1
#define DELTA_T 0.2
__constant__ int d_mask[81];
char outname[50];
//Error Checks
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel for initializing random number generators
__global__ void init_random_generator(hiprandState_t *state) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(1337, idx, 0, &state[idx]);
}
// This functions returns the count of number of unique elements in a row or column number according to the flag (Device Version)
__device__ int d_num_unique(int rc_num,int sudoku[][9],int flag)
{
int nums[9]={1,2,3,4,5,6,7,8,9};
int idx, unique_Count;
unique_Count = 0;
for(int j=0;j<9;j++)
{
if(flag==2)
idx = sudoku[j][rc_num]-1;
else
idx = sudoku[rc_num][j]-1;
if(idx==-1)
return -1;
if(nums[idx]!=0)
{
unique_Count+=1;
nums[idx]=0;
}
}
return unique_Count;
}
//Computes the energy by adding the number of unique elements in all the rows and columns
__device__ int d_compute_energy(int sudoku[][9])
{
int energy=0;
for(int i=0;i<9;i++)
energy += d_num_unique(i,sudoku,1) + d_num_unique(i,sudoku,2);
return 162-energy;
}
//Kernel to run a Markov chain
__global__ void markov(int* sudoku,hiprandState_t *state,int cur_energy,float temperature,int *b1,int *b2,int *b3,int *b4,int *b5,int *b6,int *b7,int *b8,int *b9,int *b10,int *b11,int *b12,int *b13,int *b14,int *b15,int *energy_block)
{
__shared__ int shd_sudoku[9][9];
int thread_x=threadIdx.x;
int thread_y=threadIdx.y;
int thread_num_local= threadIdx.x*blockDim.x + threadIdx.y;
int block_num= blockIdx.x*blockDim.x + blockIdx.y;
//Bring the sudoku to shared memory
shd_sudoku[thread_x][thread_y]=sudoku[thread_x+ 9*thread_y];
if(thread_num_local!=0)
{
return;
}
int block_x;
int block_y;
int r1_x, r1_y, r2_x, r2_y;
int temp;
int energy;
for(int iter=0;iter<NUM_ITERATION;iter++)
{
//Select a Random sub block in the sudoku
block_x = 3*(int)(3.0*hiprand_uniform(&state[block_num]));
block_y = 3*(int)(3.0*hiprand_uniform(&state[block_num]));
//Select two unmasked points
do
{
r1_x=(int)3.0*hiprand_uniform(&state[block_num]);
r1_y=(int)3.0*hiprand_uniform(&state[block_num]);
}while(d_mask[(block_x+r1_x)+9*(block_y+r1_y)]==1);
do{
r2_x=(int)3.0*hiprand_uniform(&state[block_num]);
r2_y=(int)3.0*hiprand_uniform(&state[block_num]);
}while(d_mask[(block_x+r2_x)+9*(block_y+r2_y)]==1);
//Swap the elements
temp=shd_sudoku[block_x+r1_x][block_y+r1_y];
shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y];
shd_sudoku[block_x+r2_x][block_y+r2_y]=temp;
//Compute the energy of this new state
energy=d_compute_energy(shd_sudoku);
if(energy<cur_energy)
cur_energy = energy;
else{
//Accept the state
if(exp((float)(cur_energy-energy)/temperature)>hiprand_uniform(&state[block_num]))
cur_energy = energy;
// if(cur_energy-energy>0.2)
// cur_energy = energy;
//Reject the state and undo changes
else{
temp=shd_sudoku[block_x+r1_x][block_y+r1_y];
shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y];
shd_sudoku[block_x+r2_x][block_y+r2_y]=temp;
}
}
//If reached the lowest point break
if(energy==0)
break;
}
//Write the result back to memory
for(int i=0;i<9;i++)
{
for(int j=0;j<9;j++)
{
if(block_num==0)
b1[i+9*j]=shd_sudoku[i][j];
if(block_num==1)
b2[i+9*j]=shd_sudoku[i][j];
if(block_num==2)
b3[i+9*j]=shd_sudoku[i][j];
if(block_num==3)
b4[i+9*j]=shd_sudoku[i][j];
if(block_num==4)
b5[i+9*j]=shd_sudoku[i][j];
if(block_num==5)
b6[i+9*j]=shd_sudoku[i][j];
if(block_num==6)
b7[i+9*j]=shd_sudoku[i][j];
if(block_num==7)
b8[i+9*j]=shd_sudoku[i][j];
if(block_num==8)
b9[i+9*j]=shd_sudoku[i][j];
if(block_num==9)
b10[i+9*j]=shd_sudoku[i][j];
if(block_num==10)
b11[i+9*j]=shd_sudoku[i][j];
if(block_num==11)
b12[i+9*j]=shd_sudoku[i][j];
if(block_num==12)
b13[i+9*j]=shd_sudoku[i][j];
if(block_num==13)
b14[i+9*j]=shd_sudoku[i][j];
if(block_num==14)
b15[i+9*j]=shd_sudoku[i][j];
}
}
//Write the energy back to memory for the current state
energy_block[block_num]=cur_energy;
}
//Display the sudoku
void display_sudoku(int *n){
printf("\n_________________________\n");
for(int i=0;i<9;i++){
printf("| ");
for(int j=0;j<9;j=j+3)
printf("%1d %1d %1d | ",n[i+9*j],n[i+9*(j+1)],n[i+9*(j+2)]);
if((i+1)%3==0){
printf("\n-------------------------\n");
}else printf("\n");
}
return;
}
/*Initialize the sudoku. 1) Read the partial sudoku.
2) Place values in all the empty slots such that the 3x3 subgrid clause is satisfied */
void init_sudoku(int *s,int *m,char* fname)
{
FILE *fin ;
fin = fopen(fname,"r");
//Output file name
int len;
for(len=0;len<strlen(fname)-2;len++)
outname[len]=fname[len];
strcat(outname,"out");
int in;
int x, y;
int p, q;
int idx;
int nums_1[9],nums_2[9];
//Read the partial sudoku from file
//Compute the mask. 0 -> mutable value 1-> non-mutable
for(int i=0;i<9;i++){
for(int j=0;j<9;j++){
fscanf(fin,"%1d",&in);
s[i+9*j] = in;
if(in==0)
m[i+9*j]=0;
else
m[i+9*j]=1;
}
}
fclose(fin);
printf("Puzzle\n");
display_sudoku(s);
//Place values in all the empty slots such that the 3x3 subgrid clause is satisfied
for(int block_i=0;block_i<3;block_i++)
{
for(int block_j=0;block_j<3;block_j++)
{
for(int k=0;k<9;k++)
nums_1[k]=k+1;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(s[x+9*y]!=0){
p = s[x+9*y];
nums_1[p-1]=0;
}
}
}
q = -1;
for(int k=0;k<9;k++)
{
if(nums_1[k]!=0)
{
q+=1;
nums_2[q] = nums_1[k];
}
}
idx = 0;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(s[x+9*y]==0)
{
s[x+9*y] = nums_2[idx];
idx+=1;
}
}
}
}
}
}
// This functions returns the count of number of unique elements in a row or column number according to the flag (Host Version)
int h_num_unique(int i, int k, int *n){
int nums[9]={1,2,3,4,5,6,7,8,9};
int idx, unique_count;
unique_count = 0;
for(int j=0;j<9;j++){
if(k==1){
idx = n[i+9*j]-1;
}
else{
idx = n[j+9*i]-1;
}
if(idx==-1){
return -1;
}
if(nums[idx]!=0){
unique_count+=1;
nums[idx]=0;
}
}
return unique_count;
}
//Computes the energy by adding the number of unique elements in all the rows and columns
int h_compute_energy(int *n)
{
int energy = 0;
for(int i=0;i<9;i++){
energy += h_num_unique(i,1,n) + h_num_unique(i,2,n);
}
return 162 - energy;
}
void write_file(int *s)
{
FILE *fout;
fout=fopen(outname,"w");
for(int i=0;i<9;i++)
{
for(int j=0;j<9;j++)
fprintf(fout,"%1d",s[i+9*j]);
if(i<8)
fprintf(fout,"\n");
}
fclose(fout);
}
//Main
int main(int arg,char* argv[]) {
//hipSetDevice(0);
//hipDeviceSetCacheConfig(hipFuncCachePreferL1);
int device;
hipGetDevice(&device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,device);
//Tunable Parameter
int num_chains;
if(prop.multiProcessorCount>=15)
num_chains=15;
else
num_chains=prop.multiProcessorCount;
float temperature=INIT_TEMPERATURE;
float temp_min=MIN_TEMPERATURE;
//Host pointers
int *sudoku;
int *mask;
int *h_energy_host;
int size=sizeof(int)*81;
//Allocate memory
gpuErrchk(hipHostMalloc((void**)&sudoku,size,hipHostMallocDefault));
gpuErrchk(hipHostMalloc((void**)&mask,size,hipHostMallocDefault));
gpuErrchk(hipHostMalloc((void**)&h_energy_host,sizeof(int)*num_chains,hipHostMallocDefault));
init_sudoku(sudoku,mask,argv[1]);
//Initial Energy of sudoku
int current_energy=h_compute_energy(sudoku);
printf("Current energy %d \n",current_energy);
//Device pointers
int *d_sudoku;
int *d_b1,*d_b2,*d_b3,*d_b4,*d_b5,*d_b6,*d_b7,*d_b8,*d_b9,*d_b10,*d_b11,*d_b12,*d_b13,*d_b14,*d_b15;
int *energy_block;
//Allocate memory
gpuErrchk(hipMalloc((void**)&d_sudoku,size));
gpuErrchk(hipMalloc((void**)&d_mask,size));
gpuErrchk(hipMalloc((void**)&d_b1,size));
gpuErrchk(hipMalloc((void**)&d_b2,size));
gpuErrchk(hipMalloc((void**)&d_b3,size));
gpuErrchk(hipMalloc((void**)&d_b4,size));
gpuErrchk(hipMalloc((void**)&d_b5,size));
gpuErrchk(hipMalloc((void**)&d_b6,size));
gpuErrchk(hipMalloc((void**)&d_b7,size));
gpuErrchk(hipMalloc((void**)&d_b8,size));
gpuErrchk(hipMalloc((void**)&d_b9,size));
gpuErrchk(hipMalloc((void**)&d_b10,size));
gpuErrchk(hipMalloc((void**)&d_b11,size));
gpuErrchk(hipMalloc((void**)&d_b12,size));
gpuErrchk(hipMalloc((void**)&d_b13,size));
gpuErrchk(hipMalloc((void**)&d_b14,size));
gpuErrchk(hipMalloc((void**)&d_b15,size));
gpuErrchk(hipMalloc((void**)&energy_block,sizeof(int)*num_chains));
//Copy Sudoku and Mask to GPU
gpuErrchk(hipMemcpy(d_sudoku,sudoku,size,hipMemcpyHostToDevice));
gpuErrchk(hipMemcpyToSymbol(d_mask,mask,size));
//Grid and Block dimensions
dim3 dimGrid(1,num_chains);
dim3 dimBlock(9,9);
printf("Solution");
//Random number generators. Launch init_random_generator kernel
hiprandState_t *d_state;
gpuErrchk(hipMalloc(&d_state, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y));
hipLaunchKernelGGL(( init_random_generator), dim3(dimGrid.x * dimGrid.y), dim3(dimBlock.x* dimBlock.y), 0, 0, d_state);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
int tolerance=INIT_TOLERANCE;
int min,min_idx;
int e;
int prev_energy=current_energy;
//Simulated Annealing loop
do{
min=200;
min_idx=200;
hipLaunchKernelGGL(( markov), dim3(dimGrid),dim3(dimBlock) , 0, 0, d_sudoku,d_state,current_energy,temperature,d_b1,d_b2,d_b3,d_b4,d_b5,d_b6,d_b7,d_b8,d_b9,d_b10,d_b11,d_b12,d_b13,d_b14,d_b15,energy_block);
gpuErrchk(hipDeviceSynchronize());
hipMemcpy(h_energy_host,energy_block,sizeof(int)*num_chains,hipMemcpyDeviceToHost);
for(e=0;e<num_chains;e++)
{
if(h_energy_host[e]<min)
{
min=h_energy_host[e];
min_idx=e;
}
}
if(min_idx==0)
{
hipMemcpy(d_sudoku,d_b1,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==1)
{
hipMemcpy(d_sudoku,d_b2,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==2)
{
hipMemcpy(d_sudoku,d_b3,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==3)
{
hipMemcpy(d_sudoku,d_b4,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==4)
{
hipMemcpy(d_sudoku,d_b5,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==5)
{
hipMemcpy(d_sudoku,d_b6,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==6)
{
hipMemcpy(d_sudoku,d_b7,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==7)
{
hipMemcpy(d_sudoku,d_b8,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==8)
{
hipMemcpy(d_sudoku,d_b9,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==9)
{
hipMemcpy(d_sudoku,d_b10,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==10)
{
hipMemcpy(d_sudoku,d_b11,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==11)
{
hipMemcpy(d_sudoku,d_b12,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==12)
{
hipMemcpy(d_sudoku,d_b13,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==13)
{
hipMemcpy(d_sudoku,d_b14,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==14)
{
hipMemcpy(d_sudoku,d_b15,size,hipMemcpyDeviceToDevice);
current_energy=min;
}
if(current_energy==0)
{
break;
}
if(current_energy==prev_energy)
tolerance--;
else
tolerance=INIT_TOLERANCE;
// Random restart if energy is stuck
if(tolerance<0)
{
//printf("Randomizing\n");
hipMemcpy(sudoku,d_sudoku,size,hipMemcpyDeviceToHost);
int ar[3]={0,3,6};
int tempa;
int rand1=random()%3;
int rand2=random()%3;
int r1_x,r1_y,r2_x,r2_y;
int block_x,block_y;
for(int suf=0;suf<random()%10;suf++)
{
block_x = ar[rand1];
block_y = ar[rand2];
do{
r1_x=random()%3;
r1_y=random()%3;;
}while(mask[(block_x+r1_x)+9*(block_y+r1_y)]==1);
do{
r2_x=random()%3;;
r2_y=random()%3;;
}while(mask[(block_x+r2_x)+9*(block_y+r2_y)]==1);
tempa=sudoku[(block_x+r1_x)+9*(block_y+r1_y)];
sudoku[(block_x+r1_x)+9*(block_y+r1_y)]=sudoku[(block_x+r2_x)+9*(block_y+r2_y)];
sudoku[(block_x+r2_x)+9*(block_y+r2_y)]=tempa;
}
hipMemcpy(d_sudoku,sudoku,size,hipMemcpyHostToDevice);
current_energy=h_compute_energy(sudoku);
//printf("Energy after randomizing %d \n",current_energy);
tolerance=INIT_TOLERANCE;
temperature=temperature+DELTA_T;
}
prev_energy=current_energy;
if(current_energy==0)
{
break;
}
temperature=temperature*0.8;
//printf("Energy after temp %f is %d \n",temperature,current_energy);
}while(temperature>temp_min);
hipMemcpy(sudoku,d_sudoku,size,hipMemcpyDeviceToHost);
display_sudoku(sudoku);
write_file(sudoku);
current_energy=h_compute_energy(sudoku);
printf("Current energy %d \n",current_energy);
return 0;
}
| 07eaf7fe9f0a8a00474a00165770bd30588f7d9e.cu | //
// Cuda Sudoku Solver
//
// Created by Arpit Jain
// Copyright (c) 2014 New York University. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include <math.h>
#include <cuda.h>
#define NUM_ITERATION 10000
#define INIT_TEMPERATURE 0.4
#define MIN_TEMPERATURE 0.001
#define INIT_TOLERANCE 1
#define DELTA_T 0.2
__constant__ int d_mask[81];
char outname[50];
//Error Checks
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel for initializing random number generators
__global__ void init_random_generator(curandState *state) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(1337, idx, 0, &state[idx]);
}
// This functions returns the count of number of unique elements in a row or column number according to the flag (Device Version)
__device__ int d_num_unique(int rc_num,int sudoku[][9],int flag)
{
int nums[9]={1,2,3,4,5,6,7,8,9};
int idx, unique_Count;
unique_Count = 0;
for(int j=0;j<9;j++)
{
if(flag==2)
idx = sudoku[j][rc_num]-1;
else
idx = sudoku[rc_num][j]-1;
if(idx==-1)
return -1;
if(nums[idx]!=0)
{
unique_Count+=1;
nums[idx]=0;
}
}
return unique_Count;
}
//Computes the energy by adding the number of unique elements in all the rows and columns
__device__ int d_compute_energy(int sudoku[][9])
{
int energy=0;
for(int i=0;i<9;i++)
energy += d_num_unique(i,sudoku,1) + d_num_unique(i,sudoku,2);
return 162-energy;
}
//Kernel to run a Markov chain
__global__ void markov(int* sudoku,curandState *state,int cur_energy,float temperature,int *b1,int *b2,int *b3,int *b4,int *b5,int *b6,int *b7,int *b8,int *b9,int *b10,int *b11,int *b12,int *b13,int *b14,int *b15,int *energy_block)
{
__shared__ int shd_sudoku[9][9];
int thread_x=threadIdx.x;
int thread_y=threadIdx.y;
int thread_num_local= threadIdx.x*blockDim.x + threadIdx.y;
int block_num= blockIdx.x*blockDim.x + blockIdx.y;
//Bring the sudoku to shared memory
shd_sudoku[thread_x][thread_y]=sudoku[thread_x+ 9*thread_y];
if(thread_num_local!=0)
{
return;
}
int block_x;
int block_y;
int r1_x, r1_y, r2_x, r2_y;
int temp;
int energy;
for(int iter=0;iter<NUM_ITERATION;iter++)
{
//Select a Random sub block in the sudoku
block_x = 3*(int)(3.0*curand_uniform(&state[block_num]));
block_y = 3*(int)(3.0*curand_uniform(&state[block_num]));
//Select two unmasked points
do
{
r1_x=(int)3.0*curand_uniform(&state[block_num]);
r1_y=(int)3.0*curand_uniform(&state[block_num]);
}while(d_mask[(block_x+r1_x)+9*(block_y+r1_y)]==1);
do{
r2_x=(int)3.0*curand_uniform(&state[block_num]);
r2_y=(int)3.0*curand_uniform(&state[block_num]);
}while(d_mask[(block_x+r2_x)+9*(block_y+r2_y)]==1);
//Swap the elements
temp=shd_sudoku[block_x+r1_x][block_y+r1_y];
shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y];
shd_sudoku[block_x+r2_x][block_y+r2_y]=temp;
//Compute the energy of this new state
energy=d_compute_energy(shd_sudoku);
if(energy<cur_energy)
cur_energy = energy;
else{
//Accept the state
if(exp((float)(cur_energy-energy)/temperature)>curand_uniform(&state[block_num]))
cur_energy = energy;
// if(cur_energy-energy>0.2)
// cur_energy = energy;
//Reject the state and undo changes
else{
temp=shd_sudoku[block_x+r1_x][block_y+r1_y];
shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y];
shd_sudoku[block_x+r2_x][block_y+r2_y]=temp;
}
}
//If reached the lowest point break
if(energy==0)
break;
}
//Write the result back to memory
for(int i=0;i<9;i++)
{
for(int j=0;j<9;j++)
{
if(block_num==0)
b1[i+9*j]=shd_sudoku[i][j];
if(block_num==1)
b2[i+9*j]=shd_sudoku[i][j];
if(block_num==2)
b3[i+9*j]=shd_sudoku[i][j];
if(block_num==3)
b4[i+9*j]=shd_sudoku[i][j];
if(block_num==4)
b5[i+9*j]=shd_sudoku[i][j];
if(block_num==5)
b6[i+9*j]=shd_sudoku[i][j];
if(block_num==6)
b7[i+9*j]=shd_sudoku[i][j];
if(block_num==7)
b8[i+9*j]=shd_sudoku[i][j];
if(block_num==8)
b9[i+9*j]=shd_sudoku[i][j];
if(block_num==9)
b10[i+9*j]=shd_sudoku[i][j];
if(block_num==10)
b11[i+9*j]=shd_sudoku[i][j];
if(block_num==11)
b12[i+9*j]=shd_sudoku[i][j];
if(block_num==12)
b13[i+9*j]=shd_sudoku[i][j];
if(block_num==13)
b14[i+9*j]=shd_sudoku[i][j];
if(block_num==14)
b15[i+9*j]=shd_sudoku[i][j];
}
}
//Write the energy back to memory for the current state
energy_block[block_num]=cur_energy;
}
//Display the sudoku
void display_sudoku(int *n){
printf("\n_________________________\n");
for(int i=0;i<9;i++){
printf("| ");
for(int j=0;j<9;j=j+3)
printf("%1d %1d %1d | ",n[i+9*j],n[i+9*(j+1)],n[i+9*(j+2)]);
if((i+1)%3==0){
printf("\n-------------------------\n");
}else printf("\n");
}
return;
}
/*Initialize the sudoku. 1) Read the partial sudoku.
2) Place values in all the empty slots such that the 3x3 subgrid clause is satisfied */
void init_sudoku(int *s,int *m,char* fname)
{
FILE *fin ;
fin = fopen(fname,"r");
//Output file name
int len;
for(len=0;len<strlen(fname)-2;len++)
outname[len]=fname[len];
strcat(outname,"out");
int in;
int x, y;
int p, q;
int idx;
int nums_1[9],nums_2[9];
//Read the partial sudoku from file
//Compute the mask. 0 -> mutable value 1-> non-mutable
for(int i=0;i<9;i++){
for(int j=0;j<9;j++){
fscanf(fin,"%1d",&in);
s[i+9*j] = in;
if(in==0)
m[i+9*j]=0;
else
m[i+9*j]=1;
}
}
fclose(fin);
printf("Puzzle\n");
display_sudoku(s);
//Place values in all the empty slots such that the 3x3 subgrid clause is satisfied
for(int block_i=0;block_i<3;block_i++)
{
for(int block_j=0;block_j<3;block_j++)
{
for(int k=0;k<9;k++)
nums_1[k]=k+1;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(s[x+9*y]!=0){
p = s[x+9*y];
nums_1[p-1]=0;
}
}
}
q = -1;
for(int k=0;k<9;k++)
{
if(nums_1[k]!=0)
{
q+=1;
nums_2[q] = nums_1[k];
}
}
idx = 0;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(s[x+9*y]==0)
{
s[x+9*y] = nums_2[idx];
idx+=1;
}
}
}
}
}
}
// This functions returns the count of number of unique elements in a row or column number according to the flag (Host Version)
int h_num_unique(int i, int k, int *n){
int nums[9]={1,2,3,4,5,6,7,8,9};
int idx, unique_count;
unique_count = 0;
for(int j=0;j<9;j++){
if(k==1){
idx = n[i+9*j]-1;
}
else{
idx = n[j+9*i]-1;
}
if(idx==-1){
return -1;
}
if(nums[idx]!=0){
unique_count+=1;
nums[idx]=0;
}
}
return unique_count;
}
//Computes the energy by adding the number of unique elements in all the rows and columns
int h_compute_energy(int *n)
{
int energy = 0;
for(int i=0;i<9;i++){
energy += h_num_unique(i,1,n) + h_num_unique(i,2,n);
}
return 162 - energy;
}
void write_file(int *s)
{
FILE *fout;
fout=fopen(outname,"w");
for(int i=0;i<9;i++)
{
for(int j=0;j<9;j++)
fprintf(fout,"%1d",s[i+9*j]);
if(i<8)
fprintf(fout,"\n");
}
fclose(fout);
}
//Main
int main(int arg,char* argv[]) {
//cudaSetDevice(0);
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
int device;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
//Tunable Parameter
int num_chains;
if(prop.multiProcessorCount>=15)
num_chains=15;
else
num_chains=prop.multiProcessorCount;
float temperature=INIT_TEMPERATURE;
float temp_min=MIN_TEMPERATURE;
//Host pointers
int *sudoku;
int *mask;
int *h_energy_host;
int size=sizeof(int)*81;
//Allocate memory
gpuErrchk(cudaHostAlloc((void**)&sudoku,size,cudaHostAllocDefault));
gpuErrchk(cudaHostAlloc((void**)&mask,size,cudaHostAllocDefault));
gpuErrchk(cudaHostAlloc((void**)&h_energy_host,sizeof(int)*num_chains,cudaHostAllocDefault));
init_sudoku(sudoku,mask,argv[1]);
//Initial Energy of sudoku
int current_energy=h_compute_energy(sudoku);
printf("Current energy %d \n",current_energy);
//Device pointers
int *d_sudoku;
int *d_b1,*d_b2,*d_b3,*d_b4,*d_b5,*d_b6,*d_b7,*d_b8,*d_b9,*d_b10,*d_b11,*d_b12,*d_b13,*d_b14,*d_b15;
int *energy_block;
//Allocate memory
gpuErrchk(cudaMalloc((void**)&d_sudoku,size));
gpuErrchk(cudaMalloc((void**)&d_mask,size));
gpuErrchk(cudaMalloc((void**)&d_b1,size));
gpuErrchk(cudaMalloc((void**)&d_b2,size));
gpuErrchk(cudaMalloc((void**)&d_b3,size));
gpuErrchk(cudaMalloc((void**)&d_b4,size));
gpuErrchk(cudaMalloc((void**)&d_b5,size));
gpuErrchk(cudaMalloc((void**)&d_b6,size));
gpuErrchk(cudaMalloc((void**)&d_b7,size));
gpuErrchk(cudaMalloc((void**)&d_b8,size));
gpuErrchk(cudaMalloc((void**)&d_b9,size));
gpuErrchk(cudaMalloc((void**)&d_b10,size));
gpuErrchk(cudaMalloc((void**)&d_b11,size));
gpuErrchk(cudaMalloc((void**)&d_b12,size));
gpuErrchk(cudaMalloc((void**)&d_b13,size));
gpuErrchk(cudaMalloc((void**)&d_b14,size));
gpuErrchk(cudaMalloc((void**)&d_b15,size));
gpuErrchk(cudaMalloc((void**)&energy_block,sizeof(int)*num_chains));
//Copy Sudoku and Mask to GPU
gpuErrchk(cudaMemcpy(d_sudoku,sudoku,size,cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyToSymbol(d_mask,mask,size));
//Grid and Block dimensions
dim3 dimGrid(1,num_chains);
dim3 dimBlock(9,9);
printf("Solution");
//Random number generators. Launch init_random_generator kernel
curandState *d_state;
gpuErrchk(cudaMalloc(&d_state, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y));
init_random_generator<<<dimGrid.x * dimGrid.y, dimBlock.x* dimBlock.y>>>(d_state);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
int tolerance=INIT_TOLERANCE;
int min,min_idx;
int e;
int prev_energy=current_energy;
//Simulated Annealing loop
do{
min=200;
min_idx=200;
markov<<< dimGrid,dimBlock >>>(d_sudoku,d_state,current_energy,temperature,d_b1,d_b2,d_b3,d_b4,d_b5,d_b6,d_b7,d_b8,d_b9,d_b10,d_b11,d_b12,d_b13,d_b14,d_b15,energy_block);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(h_energy_host,energy_block,sizeof(int)*num_chains,cudaMemcpyDeviceToHost);
for(e=0;e<num_chains;e++)
{
if(h_energy_host[e]<min)
{
min=h_energy_host[e];
min_idx=e;
}
}
if(min_idx==0)
{
cudaMemcpy(d_sudoku,d_b1,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==1)
{
cudaMemcpy(d_sudoku,d_b2,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==2)
{
cudaMemcpy(d_sudoku,d_b3,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==3)
{
cudaMemcpy(d_sudoku,d_b4,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==4)
{
cudaMemcpy(d_sudoku,d_b5,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==5)
{
cudaMemcpy(d_sudoku,d_b6,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==6)
{
cudaMemcpy(d_sudoku,d_b7,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==7)
{
cudaMemcpy(d_sudoku,d_b8,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==8)
{
cudaMemcpy(d_sudoku,d_b9,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==9)
{
cudaMemcpy(d_sudoku,d_b10,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==10)
{
cudaMemcpy(d_sudoku,d_b11,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==11)
{
cudaMemcpy(d_sudoku,d_b12,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==12)
{
cudaMemcpy(d_sudoku,d_b13,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==13)
{
cudaMemcpy(d_sudoku,d_b14,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(min_idx==14)
{
cudaMemcpy(d_sudoku,d_b15,size,cudaMemcpyDeviceToDevice);
current_energy=min;
}
if(current_energy==0)
{
break;
}
if(current_energy==prev_energy)
tolerance--;
else
tolerance=INIT_TOLERANCE;
// Random restart if energy is stuck
if(tolerance<0)
{
//printf("Randomizing\n");
cudaMemcpy(sudoku,d_sudoku,size,cudaMemcpyDeviceToHost);
int ar[3]={0,3,6};
int tempa;
int rand1=random()%3;
int rand2=random()%3;
int r1_x,r1_y,r2_x,r2_y;
int block_x,block_y;
for(int suf=0;suf<random()%10;suf++)
{
block_x = ar[rand1];
block_y = ar[rand2];
do{
r1_x=random()%3;
r1_y=random()%3;;
}while(mask[(block_x+r1_x)+9*(block_y+r1_y)]==1);
do{
r2_x=random()%3;;
r2_y=random()%3;;
}while(mask[(block_x+r2_x)+9*(block_y+r2_y)]==1);
tempa=sudoku[(block_x+r1_x)+9*(block_y+r1_y)];
sudoku[(block_x+r1_x)+9*(block_y+r1_y)]=sudoku[(block_x+r2_x)+9*(block_y+r2_y)];
sudoku[(block_x+r2_x)+9*(block_y+r2_y)]=tempa;
}
cudaMemcpy(d_sudoku,sudoku,size,cudaMemcpyHostToDevice);
current_energy=h_compute_energy(sudoku);
//printf("Energy after randomizing %d \n",current_energy);
tolerance=INIT_TOLERANCE;
temperature=temperature+DELTA_T;
}
prev_energy=current_energy;
if(current_energy==0)
{
break;
}
temperature=temperature*0.8;
//printf("Energy after temp %f is %d \n",temperature,current_energy);
}while(temperature>temp_min);
cudaMemcpy(sudoku,d_sudoku,size,cudaMemcpyDeviceToHost);
display_sudoku(sudoku);
write_file(sudoku);
current_energy=h_compute_energy(sudoku);
printf("Current energy %d \n",current_energy);
return 0;
}
|
ae4257e763f4cda76374a3fa2832cb9e9ed9bc2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void vector_add(int *out, int *a, int *b, int n){
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i+=stride) {
out[i] = a[i] + b[i];
}
}
int main() {
int n = 10;
int *a, *b, *out, *dev_a, *dev_b, *dev_out;
a = (int*) malloc(n * sizeof(int));
b = (int*) malloc(n * sizeof(int));
out = (int*) malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
a[i] = 1;
b[i] = 2;
}
// Allocate device memory
hipMalloc((void**)&dev_a, sizeof(float) * n);
hipMalloc((void**)&dev_b, sizeof(float) * n);
hipMalloc((void**)&dev_out, sizeof(float) * n);
// Transfer data from host to device memory
hipMemcpy(dev_a, a, sizeof(float) * n, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(float) * n, hipMemcpyHostToDevice);
// Executing kernel
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(256), 0, 0, dev_out, dev_a, dev_b, n);
// Transfer data back to host memory
hipMemcpy(out, dev_out, sizeof(float) * n, hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
printf("%d\n", out[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_out);
delete[] a;
delete[] out;
delete[] b;
return EXIT_SUCCESS;
}
| ae4257e763f4cda76374a3fa2832cb9e9ed9bc2f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void vector_add(int *out, int *a, int *b, int n){
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i+=stride) {
out[i] = a[i] + b[i];
}
}
int main() {
int n = 10;
int *a, *b, *out, *dev_a, *dev_b, *dev_out;
a = (int*) malloc(n * sizeof(int));
b = (int*) malloc(n * sizeof(int));
out = (int*) malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
a[i] = 1;
b[i] = 2;
}
// Allocate device memory
cudaMalloc((void**)&dev_a, sizeof(float) * n);
cudaMalloc((void**)&dev_b, sizeof(float) * n);
cudaMalloc((void**)&dev_out, sizeof(float) * n);
// Transfer data from host to device memory
cudaMemcpy(dev_a, a, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float) * n, cudaMemcpyHostToDevice);
// Executing kernel
vector_add<<<1,256>>>(dev_out, dev_a, dev_b, n);
// Transfer data back to host memory
cudaMemcpy(out, dev_out, sizeof(float) * n, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
printf("%d\n", out[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_out);
delete[] a;
delete[] out;
delete[] b;
return EXIT_SUCCESS;
}
|
20558d213c6bc6f807203e30b74b7b92a311a5d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Cauta_Nod.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
Nod *noduri = NULL;
hipMalloc(&noduri, XSIZE*YSIZE);
Muchie *muchii = NULL;
hipMalloc(&muchii, XSIZE*YSIZE);
int *costuri = NULL;
hipMalloc(&costuri, XSIZE*YSIZE);
int *costTemporal = NULL;
hipMalloc(&costTemporal, XSIZE*YSIZE);
int *costFinal = NULL;
hipMalloc(&costFinal, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Cauta_Nod), dim3(gridBlock),dim3(threadBlock), 0, 0, noduri,muchii,costuri,costTemporal,costFinal);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Cauta_Nod), dim3(gridBlock),dim3(threadBlock), 0, 0, noduri,muchii,costuri,costTemporal,costFinal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Cauta_Nod), dim3(gridBlock),dim3(threadBlock), 0, 0, noduri,muchii,costuri,costTemporal,costFinal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 20558d213c6bc6f807203e30b74b7b92a311a5d6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Cauta_Nod.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
Nod *noduri = NULL;
cudaMalloc(&noduri, XSIZE*YSIZE);
Muchie *muchii = NULL;
cudaMalloc(&muchii, XSIZE*YSIZE);
int *costuri = NULL;
cudaMalloc(&costuri, XSIZE*YSIZE);
int *costTemporal = NULL;
cudaMalloc(&costTemporal, XSIZE*YSIZE);
int *costFinal = NULL;
cudaMalloc(&costFinal, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Cauta_Nod<<<gridBlock,threadBlock>>>(noduri,muchii,costuri,costTemporal,costFinal);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Cauta_Nod<<<gridBlock,threadBlock>>>(noduri,muchii,costuri,costTemporal,costFinal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Cauta_Nod<<<gridBlock,threadBlock>>>(noduri,muchii,costuri,costTemporal,costFinal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
SortByPhi.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VeloDefinitions.cuh"
#include "math_constants.h"
#include "Sorting.cuh"
/**
* @brief Calculates phi for each hit
*/
__device__ void sort_by_phi(
const uint event_hit_start,
const uint event_number_of_hits,
float* hit_Xs,
float* hit_Ys,
float* hit_Zs,
uint* hit_IDs,
int32_t* hit_temp,
uint* hit_permutations
) {
// Let's work with new pointers
// Note: It is important we populate later on in strictly
// the same order, to not lose data
float* new_hit_Xs = (float*) hit_temp;
float* new_hit_Ys = hit_Xs;
float* new_hit_Zs = hit_Ys;
uint* new_hit_IDs = (uint*) hit_Zs;
// Apply permutation across all arrays
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Xs, new_hit_Xs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Ys, new_hit_Ys);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Zs, new_hit_Zs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_IDs, new_hit_IDs);
}
| SortByPhi.cu | #include "VeloDefinitions.cuh"
#include "math_constants.h"
#include "Sorting.cuh"
/**
* @brief Calculates phi for each hit
*/
__device__ void sort_by_phi(
const uint event_hit_start,
const uint event_number_of_hits,
float* hit_Xs,
float* hit_Ys,
float* hit_Zs,
uint* hit_IDs,
int32_t* hit_temp,
uint* hit_permutations
) {
// Let's work with new pointers
// Note: It is important we populate later on in strictly
// the same order, to not lose data
float* new_hit_Xs = (float*) hit_temp;
float* new_hit_Ys = hit_Xs;
float* new_hit_Zs = hit_Ys;
uint* new_hit_IDs = (uint*) hit_Zs;
// Apply permutation across all arrays
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Xs, new_hit_Xs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Ys, new_hit_Ys);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_Zs, new_hit_Zs);
__syncthreads();
apply_permutation(hit_permutations, event_hit_start, event_number_of_hits, hit_IDs, new_hit_IDs);
}
|
658a4edf384535e241dbb444f58988e8bf543cd2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mult_dist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_A = NULL;
hipMalloc(&d_A, XSIZE*YSIZE);
int *d_B = NULL;
hipMalloc(&d_B, XSIZE*YSIZE);
int *d_C = NULL;
hipMalloc(&d_C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mult_dist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mult_dist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mult_dist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 658a4edf384535e241dbb444f58988e8bf543cd2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mult_dist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_A = NULL;
cudaMalloc(&d_A, XSIZE*YSIZE);
int *d_B = NULL;
cudaMalloc(&d_B, XSIZE*YSIZE);
int *d_C = NULL;
cudaMalloc(&d_C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mult_dist<<<gridBlock,threadBlock>>>(d_A,d_B,d_C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mult_dist<<<gridBlock,threadBlock>>>(d_A,d_B,d_C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mult_dist<<<gridBlock,threadBlock>>>(d_A,d_B,d_C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ddea3fbeebd2467f825112dcaf9b97a0e8181361.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Pi - CUDA version 1 - uses integers for CUDA kernels
* Author: Felipe Gutierrez, SBEL, July 2015
*/
#include <iostream>
#include <stdio.h> /* fprintf() */
#include <cstdlib> /* malloc and free */
#include <float.h> /* DBL_EPSILON() */
#include <math.h> /* sqrt() */
#include <ctime>
#include "pi-kernel.h"
/* Only add openmp if it will be used */
#if OPENMP_ENABLED
#include <omp.h>
#endif
/**
* @brief CUDA macro
* @details
* If CUDA is enabled we need to define:
* * nthreads = Number of threads per block we want.
*
* * NUMBLOCKS = Gives the number of blocks we want to use to parallelize a problem of
* size n.
*
* * KERNEL = KERNEL(n) to specified the number of blocks and the number of threads
* per block if CUDA is ENABLED. If CUDA is not enabled then KERNEL(n) is just an empty
* piece of code.
*
*/
#if CUDA_ENABLED
#include "TimerGPU.h"
#include <thrust/reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#define nthreads 1024
#define getGridDim(n) (int)ceil(sqrt(n/nthreads))
#define GRID(n) dim3(getGridDim(n), getGridDim(n))
#define BLOCK(n) dim3(nthreads)
#definehipLaunchKernelGGL(( KERNEL(n)) , dim3(GRID(n)), dim3(BLOCK(n)), 0, 0, /* Necessary for kernels */
#else
#include "TimerCPU.h"
#define KERNELn) /* Empty code */
#endif
/**
* @brief calculateAreas kernel
* @details
* * threadId: Index in the areas area. Tells us where to store the calculated area. With
* CUDA this is calculated with threadId and blockId. In serial and OpenMP this is the
* obtained by the for loop counter.
* * x: Current x coordinate
* * heightSq: height of rectangle squared
*
* @param numRects numRects we are going to use to estimate the area under the curve. This defines
* how big our problem size will be. This is the n in KERNEL(n).
*
* @param width of rectangle
*
* @param areas Pre allocated array that will contain areas. --> This array was allocated with
* hipMallocManaged() function which is what leads to UnifiedMemory.
*
* @return fills the areas array
*/
#if CUDA_ENABLED
__global__
#endif
void calculateAreas(const long numRects, const double width, double *dev_areas)
{
/* If cuda is enabled calculate the threadId which gives us the index in dev_areas */
#if CUDA_ENABLED
/* Calculate threadId for 1D grid 1D block*/
//int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
/* Calculate threadId for 2D grid 1D block*/
int threadId = (blockIdx.y*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x;
if(threadId >= numRects)
{
return;
}
#else
/* We don't have to delete the #pragma clause for the serial version of the code. If the program is not compiled with -fopenmp and omp.h is not included the compiler will just ignore the #pragma clause. */
#pragma omp parallel for
/* Define the for loop if cuda is not enable. This is used in both the serial and openmp version */
for(int threadId = 0;threadId < numRects;threadId++)
#endif
{
double x = threadId * width;
double heightSq = 1 - (x*x);
double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
// /* Add Extra computations in order to be able to see the performance difference between CPU and GPU */
// x = sqrt((float)kthreadId) * pow(width,3);
// heightSq = 1 - (x*x);
// height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
dev_areas[threadId] = (width * height);
}
}
void calculateArea(const long numRects, double *area) {
double *hostAreas;
double *deviceAreas;
double *unifiedAreas;
int i;
/////////////////////////////// MEMORY ALLOCATION SECTION ////////////////////////////////////////
/* If CUDA is enabled allocate memory in device either using hipMalloc or hipMallocManaged */
#if CUDA_ENABLED
hipError_t err;
if(getGridDim(numRects) >= 65535)
{
fprintf(stderr, "Error: WAY TOO MANY RECTANGLES. Do you really want to compute more than 4.3979123e+12 rectangles!!!! Please input less rectangles");
return;
}
std::cout << "Grid Dimensions = " << getGridDim(numRects) << std::endl;
#if UNIFIEDMEM_ENABLED
printf("Unified Memory is Enabled. Allocating using hipMallocManaged \n");
err = hipMallocManaged(&unifiedAreas, numRects * sizeof(double));
#else
printf("Unified Memory is NOT Enabled. Allocating using hipMalloc \n");
err = hipMalloc(&deviceAreas, numRects * sizeof(double));
#endif
/* Check for error in device memory allocation */
if (err != hipSuccess)
{
fprintf(stderr, "hipMalloc or hipMallocManaged failed: %s\n", hipGetErrorString(err));
}
/* If CUDA is not enabled we are running on the CPU either serially or with openmp so we allocate memory in the host */
#else
hostAreas = (double*)malloc(numRects * sizeof(double));
if (hostAreas == NULL)
{
fprintf(stderr, "malloc failed!\n");
}
#endif
/////////////////////////////// KERNEL CALL SECTION ////////////////////////////////////////
/* If CUDA is enabled do the kernel and reduce call either with unifiedMemory or with device memory*/
#if CUDA_ENABLED
/* Start all cudaEvents so we can record timings */
GpuTimer kernelTimer("Kernel");
GpuTimer reduceTimer("Reduce");
GpuTimer allTimer("All");
allTimer.Start();
kernelTimer.Start();
#if UNIFIEDMEM_ENABLED
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas);
#else
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), deviceAreas);
#endif
kernelTimer.Stop();
reduceTimer.Start();
#if UNIFIEDMEM_ENABLED
(*area) = thrust::reduce(thrust::hip::par, unifiedAreas, unifiedAreas + numRects);
#else
(*area) = thrust::reduce(thrust::hip::par, deviceAreas, deviceAreas + numRects);
#endif
reduceTimer.Stop();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
hipFree(deviceAreas);
hipFree(unifiedAreas);
/* If CUDA is not enabled calculateAreas is not a kernel but a normal function. */
#else
/* This kernel call could also be given unifiedMemory as argument but for organization purposes it is called with hostAreas */
CpuTimer kernelTimer("Kernel");
CpuTimer reduceTimer("Reduce");
CpuTimer allTimer("All");
allTimer.Start();
allTimer.Start_cputimer();
kernelTimer.Start();
kernelTimer.Start_cputimer();
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), hostAreas);
kernelTimer.Stop_cputimer();
kernelTimer.Stop();
(*area) = 0.0;
reduceTimer.Start();
reduceTimer.Start_cputimer();
for (i = 0; i < numRects; i++)
{
(*area) += hostAreas[i];
}
reduceTimer.Stop_cputimer();
reduceTimer.Stop();
allTimer.Stop_cputimer();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
free(hostAreas);
#endif
///////////////////// GPU OR CPU FREE THE MEMORY ////////////////////
}
#if CUDA_ENABLED
void printDeviceInfo()
{
int device;
struct hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
std::cout << "Device info: " <<std::endl;
std::cout << "Name: " << props.name <<std::endl;
std::cout << "version: " << props.major << "," << props.minor <<std::endl;
}
#endif | ddea3fbeebd2467f825112dcaf9b97a0e8181361.cu | /* Pi - CUDA version 1 - uses integers for CUDA kernels
* Author: Felipe Gutierrez, SBEL, July 2015
*/
#include <iostream>
#include <stdio.h> /* fprintf() */
#include <cstdlib> /* malloc and free */
#include <float.h> /* DBL_EPSILON() */
#include <math.h> /* sqrt() */
#include <ctime>
#include "pi-kernel.h"
/* Only add openmp if it will be used */
#if OPENMP_ENABLED
#include <omp.h>
#endif
/**
* @brief CUDA macro
* @details
* If CUDA is enabled we need to define:
* * nthreads = Number of threads per block we want.
*
* * NUMBLOCKS = Gives the number of blocks we want to use to parallelize a problem of
* size n.
*
* * KERNEL = KERNEL(n) to specified the number of blocks and the number of threads
* per block if CUDA is ENABLED. If CUDA is not enabled then KERNEL(n) is just an empty
* piece of code.
*
*/
#if CUDA_ENABLED
#include "TimerGPU.h"
#include <thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#define nthreads 1024
#define getGridDim(n) (int)ceil(sqrt(n/nthreads))
#define GRID(n) dim3(getGridDim(n), getGridDim(n))
#define BLOCK(n) dim3(nthreads)
#define KERNEL(n) <<<GRID(n), BLOCK(n)>>> /* Necessary for kernels */
#else
#include "TimerCPU.h"
#define KERNEL(n) /* Empty code */
#endif
/**
* @brief calculateAreas kernel
* @details
* * threadId: Index in the areas area. Tells us where to store the calculated area. With
* CUDA this is calculated with threadId and blockId. In serial and OpenMP this is the
* obtained by the for loop counter.
* * x: Current x coordinate
* * heightSq: height of rectangle squared
*
* @param numRects numRects we are going to use to estimate the area under the curve. This defines
* how big our problem size will be. This is the n in KERNEL(n).
*
* @param width of rectangle
*
* @param areas Pre allocated array that will contain areas. --> This array was allocated with
* cudaMallocManaged() function which is what leads to UnifiedMemory.
*
* @return fills the areas array
*/
#if CUDA_ENABLED
__global__
#endif
void calculateAreas(const long numRects, const double width, double *dev_areas)
{
/* If cuda is enabled calculate the threadId which gives us the index in dev_areas */
#if CUDA_ENABLED
/* Calculate threadId for 1D grid 1D block*/
//int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
/* Calculate threadId for 2D grid 1D block*/
int threadId = (blockIdx.y*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x;
if(threadId >= numRects)
{
return;
}
#else
/* We don't have to delete the #pragma clause for the serial version of the code. If the program is not compiled with -fopenmp and omp.h is not included the compiler will just ignore the #pragma clause. */
#pragma omp parallel for
/* Define the for loop if cuda is not enable. This is used in both the serial and openmp version */
for(int threadId = 0;threadId < numRects;threadId++)
#endif
{
double x = threadId * width;
double heightSq = 1 - (x*x);
double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
// /* Add Extra computations in order to be able to see the performance difference between CPU and GPU */
// x = sqrt((float)kthreadId) * pow(width,3);
// heightSq = 1 - (x*x);
// height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
dev_areas[threadId] = (width * height);
}
}
void calculateArea(const long numRects, double *area) {
double *hostAreas;
double *deviceAreas;
double *unifiedAreas;
int i;
/////////////////////////////// MEMORY ALLOCATION SECTION ////////////////////////////////////////
/* If CUDA is enabled allocate memory in device either using cudaMalloc or cudaMallocManaged */
#if CUDA_ENABLED
cudaError_t err;
if(getGridDim(numRects) >= 65535)
{
fprintf(stderr, "Error: WAY TOO MANY RECTANGLES. Do you really want to compute more than 4.3979123e+12 rectangles!!!! Please input less rectangles");
return;
}
std::cout << "Grid Dimensions = " << getGridDim(numRects) << std::endl;
#if UNIFIEDMEM_ENABLED
printf("Unified Memory is Enabled. Allocating using cudaMallocManaged \n");
err = cudaMallocManaged(&unifiedAreas, numRects * sizeof(double));
#else
printf("Unified Memory is NOT Enabled. Allocating using cudaMalloc \n");
err = cudaMalloc(&deviceAreas, numRects * sizeof(double));
#endif
/* Check for error in device memory allocation */
if (err != cudaSuccess)
{
fprintf(stderr, "cudaMalloc or cudaMallocManaged failed: %s\n", cudaGetErrorString(err));
}
/* If CUDA is not enabled we are running on the CPU either serially or with openmp so we allocate memory in the host */
#else
hostAreas = (double*)malloc(numRects * sizeof(double));
if (hostAreas == NULL)
{
fprintf(stderr, "malloc failed!\n");
}
#endif
/////////////////////////////// KERNEL CALL SECTION ////////////////////////////////////////
/* If CUDA is enabled do the kernel and reduce call either with unifiedMemory or with device memory*/
#if CUDA_ENABLED
/* Start all cudaEvents so we can record timings */
GpuTimer kernelTimer("Kernel");
GpuTimer reduceTimer("Reduce");
GpuTimer allTimer("All");
allTimer.Start();
kernelTimer.Start();
#if UNIFIEDMEM_ENABLED
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas);
#else
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), deviceAreas);
#endif
kernelTimer.Stop();
reduceTimer.Start();
#if UNIFIEDMEM_ENABLED
(*area) = thrust::reduce(thrust::cuda::par, unifiedAreas, unifiedAreas + numRects);
#else
(*area) = thrust::reduce(thrust::cuda::par, deviceAreas, deviceAreas + numRects);
#endif
reduceTimer.Stop();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
cudaFree(deviceAreas);
cudaFree(unifiedAreas);
/* If CUDA is not enabled calculateAreas is not a kernel but a normal function. */
#else
/* This kernel call could also be given unifiedMemory as argument but for organization purposes it is called with hostAreas */
CpuTimer kernelTimer("Kernel");
CpuTimer reduceTimer("Reduce");
CpuTimer allTimer("All");
allTimer.Start();
allTimer.Start_cputimer();
kernelTimer.Start();
kernelTimer.Start_cputimer();
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), hostAreas);
kernelTimer.Stop_cputimer();
kernelTimer.Stop();
(*area) = 0.0;
reduceTimer.Start();
reduceTimer.Start_cputimer();
for (i = 0; i < numRects; i++)
{
(*area) += hostAreas[i];
}
reduceTimer.Stop_cputimer();
reduceTimer.Stop();
allTimer.Stop_cputimer();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
free(hostAreas);
#endif
///////////////////// GPU OR CPU FREE THE MEMORY ////////////////////
}
#if CUDA_ENABLED
void printDeviceInfo()
{
int device;
struct cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
std::cout << "Device info: " <<std::endl;
std::cout << "Name: " << props.name <<std::endl;
std::cout << "version: " << props.major << "," << props.minor <<std::endl;
}
#endif |
2a8fb4ea8b26a33c24711d98aba7fbaa1eaeadde.hip | // !!! This is a file automatically generated by hipify!!!
#include <f/device/assert/cuda_assert.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <cassert>
struct cuda_pattern;
cuda_pattern* make_cuda_pattern( unsigned long n, unsigned long ug_size );
void release_cuda_pattern( cuda_pattern* cp );
void cuda_pattern_register_entry( cuda_pattern* cp, unsigned long index, unsigned long dim, unsigned long* ar, double* diag, double* intensity );
void cuda_pattern_update_ug_thickness( cuda_pattern* cp, double* p );
double cuda_pattern_make_residual( cuda_pattern* cp );
double cuda_pattern_make_residual( cuda_pattern* cp );
//device
struct individual_pattern
{
//all gpu
unsigned long dim;
double* ug_thickness; //ug_size * 2 + 1
unsigned long* ar;
double* diag;
double* I_exp;
double* I_sim;
double2* A;
double2* S;
double2* S0;
double2* S1;
double2* S2;
double2* S3;
double2* S4;
double2* S5;
double2* S6;
double2* S7;
double2* S8;
double2* S9;
double2* S10;
};
//host
struct pattern_on_gpu
{
int gpu_id;
int pattern_id_start;
int pattern_id_end;
individual_pattern* ip; //cpu -- > gpu ...
double* ug_thickness; //gpu
};
//host
struct cuda_pattern
{
unsigned long total_ug_size; //--> count as complex type
unsigned long total_cuda_pattern;
int total_gpu;
pattern_on_gpu* pog; //cpu
};
void cuda_pattern_update_ug_thickness( cuda_pattern* cp, double* p )
{
//copy to gpu
for ( int index = 0; index != (*cp).total_gpu; ++index )
{
//!!GPU CODE
cuda_assert( hipSetDevice(index) );
cuda_assert( hipMemcpy( ((*cp).pog)[index].ug_thickness, p, sizeof(unsigned long)*dim*dim, hipMemcpyHostToDevice ) );
}
}
void release_cuda_pattern( cuda_pattern* cp )
{
for ( int index = 0; index != (*cp).total_gpu; ++index )
{
pattern_on_gpu* the_pog = (*cp).pog + index;
//switch to gpu
//!!GPU CODE
cuda_assert( hipSetDevice((*the_pog).gpu_id) );
cuda_assert( hipFree( (*the_pog).ug_thickness ) );
(*the_pog).ug_thickness = 0;
int const total_individual_pattern = (*the_pog).pattern_id_end - (*the_pog).pattern_id.start;
for ( int jndex = 0; jndex != total_individual_pattern; ++jndex )
{
individual_pattern* the_individual_pattern = (*the_pog).ip + jndex;
//cuda_assert( hipFree( (*the_individual_pattern).ug_thickness ) );
cuda_assert( hipFree( (*the_individual_pattern).ar ) );
cuda_assert( hipFree( (*the_individual_pattern).diag ) );
cuda_assert( hipFree( (*the_individual_pattern).I_exp ) );
cuda_assert( hipFree( (*the_individual_pattern).I_sim ) );
cuda_assert( hipFree( (*the_individual_pattern).A ) );
cuda_assert( hipFree( (*the_individual_pattern).S ) );
cuda_assert( hipFree( (*the_individual_pattern).S0 ) );
cuda_assert( hipFree( (*the_individual_pattern).S1 ) );
cuda_assert( hipFree( (*the_individual_pattern).S2 ) );
cuda_assert( hipFree( (*the_individual_pattern).S3 ) );
cuda_assert( hipFree( (*the_individual_pattern).S4 ) );
cuda_assert( hipFree( (*the_individual_pattern).S5 ) );
cuda_assert( hipFree( (*the_individual_pattern).S6 ) );
cuda_assert( hipFree( (*the_individual_pattern).S7 ) );
cuda_assert( hipFree( (*the_individual_pattern).S8 ) );
cuda_assert( hipFree( (*the_individual_pattern).S9 ) );
cuda_assert( hipFree( (*the_individual_pattern).S10 ) );
//(*the_individual_pattern).ug_thickness = 0;
(*the_individual_pattern).ar = 0;
(*the_individual_pattern).diag = 0;
(*the_individual_pattern).I_exp = 0;
(*the_individual_pattern).I_sim = 0;
(*the_individual_pattern).A = 0;
(*the_individual_pattern).S = 0;
(*the_individual_pattern).S0 = 0;
(*the_individual_pattern).S1 = 0;
(*the_individual_pattern).S2 = 0;
(*the_individual_pattern).S3 = 0;
(*the_individual_pattern).S4 = 0;
(*the_individual_pattern).S5 = 0;
(*the_individual_pattern).S6 = 0;
(*the_individual_pattern).S7 = 0;
(*the_individual_pattern).S8 = 0;
(*the_individual_pattern).S9 = 0;
(*the_individual_pattern).S10 = 0;
}
delete[] (*the_pog).ip;
}
delete[] (*cp).pog;
(*cp).pog = 0;
delete cp;
cp = 0;
}
void cuda_pattern_register_entry( cuda_pattern* cp, unsigned long index, unsigned long dim, unsigned long* ar, double* diag, double* intensity )
{
//find coresponding individual_pattern
int gpu_index = 0;
for ( gpu_index = 0; gpu_index != (*cp).total_gpu; ++gpu_index )
if ( index < ((*cp).pog)[gpu_index].pattern_id_end )
break;
pattern_on_gpu* the_pog = (*cp).pog + gpu_index;
int const pattern_index = index - (*the_pog).pattern_id_start;
individual_pattern* the_individual_pattern = (*the_pog).ip + pattern_index;
//!!GPU CODE
//switch to the current gpu
cuda_assert( hipSetDevice(gpu_index) );
//allocate memory
//cuda_assert( hipMalloc( &( (*the_individual_pattern).ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).ar ), sizeof(unsigned long) * dim * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).diag ), sizeof(double) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).I_exp ), sizeof(double) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).I_sim ), sizeof(double) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).A ), sizeof(double) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S0 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S1 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S2 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S3 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S4 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S5 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S6 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S7 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S8 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S9 ), sizeof(double2) * dim ) );
cuda_assert( hipMalloc( &( (*the_individual_pattern).S10 ), sizeof(double2) * dim ) );
//copy ar
cuda_assert( hipMemcpy( (*the_individual_pattern).ar, ar, sizeof(unsigned long)*dim*dim, hipMemcpyHostToDevice ) );
//copy diag
cuda_assert( hipMemcpy( (*the_individual_pattern).diag, diag, sizeof(double)*dim, hipMemcpyHostToDevice ) );
//copy intensity
cuda_assert( hipMemcpy( (*the_individual_pattern).I_exp, intensity, sizeof(double)*dim, hipMemcpyHostToDevice ) );
}
cuda_pattern* make_cuda_pattern( unsigned long n, unsigned long ug_size; )
{
int total_gpus_avaliable = 0;
cuda_assert( hipGetDeviceCount(&total_gpus_avaliable) );
assert( total_gpus_avaliable > 0 );
int const patterns_per_gpu = ( n + total_gpus_avaliable - 1 ) / total_gpus_avaliable;
int const patterns_on_last_gpu = n - patterns_per_gpu * ( total_gpus_avaliable - 1 );
//cuda_pattern
cuda_pattern* cp = new cuda_pattern;
(*cp).total_ug_size = ug_size;
(*cp).total_gpu = total_gpus_avaliable;
(*cp).total_cuda_pattern = n;
(*cp).pog = new pattern_on_gpu[total_gpus_avaliable];
//pattern on gpu
for ( int index = 0; index != total_gpus_avaliable - 1; ++index ) //will skip if only one gpu
{
((*cp).pog)[index].gpu_id = index;
((*cp).pog)[index].pattern_id_start = index * pattern_on_gpu;
((*cp).pog)[index].pattern_id_end = ( index + 1 ) * pattern_on_gpu;
((*cp).pog)[index].ip = new individual_pattern[patterns_per_gpu];
//!!GPU CODE
cuda_assert( hipSetDevice(index) );
cuda_assert( hipMalloc( &( ((*cp).pog)[index].ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
}
if ( patterns_on_last_gpu > 0 ) //the last gpu
{
int const index = total_gpus_avaliable - 1;
((*cp).pog)[index].gpu_id = index;
((*cp).pog)[index].pattern_id_start = index * pattern_on_gpu;
((*cp).pog)[index].pattern_id_end = ( index + 1 ) * pattern_on_gpu;
((*cp).pog)[index].ip = new individual_pattern[patterns_per_gpu];
//!!GPU CODE
cuda_assert( hipSetDevice(index) );
cuda_assert( hipMalloc( &( ((*cp).pog)[index].ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
}
//individual_pattern
retun cp;
}
| 2a8fb4ea8b26a33c24711d98aba7fbaa1eaeadde.cu | #include <f/device/assert/cuda_assert.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <cassert>
struct cuda_pattern;
cuda_pattern* make_cuda_pattern( unsigned long n, unsigned long ug_size );
void release_cuda_pattern( cuda_pattern* cp );
void cuda_pattern_register_entry( cuda_pattern* cp, unsigned long index, unsigned long dim, unsigned long* ar, double* diag, double* intensity );
void cuda_pattern_update_ug_thickness( cuda_pattern* cp, double* p );
double cuda_pattern_make_residual( cuda_pattern* cp );
double cuda_pattern_make_residual( cuda_pattern* cp );
//device
struct individual_pattern
{
//all gpu
unsigned long dim;
double* ug_thickness; //ug_size * 2 + 1
unsigned long* ar;
double* diag;
double* I_exp;
double* I_sim;
double2* A;
double2* S;
double2* S0;
double2* S1;
double2* S2;
double2* S3;
double2* S4;
double2* S5;
double2* S6;
double2* S7;
double2* S8;
double2* S9;
double2* S10;
};
//host
struct pattern_on_gpu
{
int gpu_id;
int pattern_id_start;
int pattern_id_end;
individual_pattern* ip; //cpu -- > gpu ...
double* ug_thickness; //gpu
};
//host
struct cuda_pattern
{
unsigned long total_ug_size; //--> count as complex type
unsigned long total_cuda_pattern;
int total_gpu;
pattern_on_gpu* pog; //cpu
};
void cuda_pattern_update_ug_thickness( cuda_pattern* cp, double* p )
{
//copy to gpu
for ( int index = 0; index != (*cp).total_gpu; ++index )
{
//!!GPU CODE
cuda_assert( cudaSetDevice(index) );
cuda_assert( cudaMemcpy( ((*cp).pog)[index].ug_thickness, p, sizeof(unsigned long)*dim*dim, cudaMemcpyHostToDevice ) );
}
}
void release_cuda_pattern( cuda_pattern* cp )
{
for ( int index = 0; index != (*cp).total_gpu; ++index )
{
pattern_on_gpu* the_pog = (*cp).pog + index;
//switch to gpu
//!!GPU CODE
cuda_assert( cudaSetDevice((*the_pog).gpu_id) );
cuda_assert( cudaFree( (*the_pog).ug_thickness ) );
(*the_pog).ug_thickness = 0;
int const total_individual_pattern = (*the_pog).pattern_id_end - (*the_pog).pattern_id.start;
for ( int jndex = 0; jndex != total_individual_pattern; ++jndex )
{
individual_pattern* the_individual_pattern = (*the_pog).ip + jndex;
//cuda_assert( cudaFree( (*the_individual_pattern).ug_thickness ) );
cuda_assert( cudaFree( (*the_individual_pattern).ar ) );
cuda_assert( cudaFree( (*the_individual_pattern).diag ) );
cuda_assert( cudaFree( (*the_individual_pattern).I_exp ) );
cuda_assert( cudaFree( (*the_individual_pattern).I_sim ) );
cuda_assert( cudaFree( (*the_individual_pattern).A ) );
cuda_assert( cudaFree( (*the_individual_pattern).S ) );
cuda_assert( cudaFree( (*the_individual_pattern).S0 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S1 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S2 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S3 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S4 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S5 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S6 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S7 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S8 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S9 ) );
cuda_assert( cudaFree( (*the_individual_pattern).S10 ) );
//(*the_individual_pattern).ug_thickness = 0;
(*the_individual_pattern).ar = 0;
(*the_individual_pattern).diag = 0;
(*the_individual_pattern).I_exp = 0;
(*the_individual_pattern).I_sim = 0;
(*the_individual_pattern).A = 0;
(*the_individual_pattern).S = 0;
(*the_individual_pattern).S0 = 0;
(*the_individual_pattern).S1 = 0;
(*the_individual_pattern).S2 = 0;
(*the_individual_pattern).S3 = 0;
(*the_individual_pattern).S4 = 0;
(*the_individual_pattern).S5 = 0;
(*the_individual_pattern).S6 = 0;
(*the_individual_pattern).S7 = 0;
(*the_individual_pattern).S8 = 0;
(*the_individual_pattern).S9 = 0;
(*the_individual_pattern).S10 = 0;
}
delete[] (*the_pog).ip;
}
delete[] (*cp).pog;
(*cp).pog = 0;
delete cp;
cp = 0;
}
void cuda_pattern_register_entry( cuda_pattern* cp, unsigned long index, unsigned long dim, unsigned long* ar, double* diag, double* intensity )
{
//find coresponding individual_pattern
int gpu_index = 0;
for ( gpu_index = 0; gpu_index != (*cp).total_gpu; ++gpu_index )
if ( index < ((*cp).pog)[gpu_index].pattern_id_end )
break;
pattern_on_gpu* the_pog = (*cp).pog + gpu_index;
int const pattern_index = index - (*the_pog).pattern_id_start;
individual_pattern* the_individual_pattern = (*the_pog).ip + pattern_index;
//!!GPU CODE
//switch to the current gpu
cuda_assert( cudaSetDevice(gpu_index) );
//allocate memory
//cuda_assert( cudaMalloc( &( (*the_individual_pattern).ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).ar ), sizeof(unsigned long) * dim * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).diag ), sizeof(double) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).I_exp ), sizeof(double) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).I_sim ), sizeof(double) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).A ), sizeof(double) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S0 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S1 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S2 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S3 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S4 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S5 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S6 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S7 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S8 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S9 ), sizeof(double2) * dim ) );
cuda_assert( cudaMalloc( &( (*the_individual_pattern).S10 ), sizeof(double2) * dim ) );
//copy ar
cuda_assert( cudaMemcpy( (*the_individual_pattern).ar, ar, sizeof(unsigned long)*dim*dim, cudaMemcpyHostToDevice ) );
//copy diag
cuda_assert( cudaMemcpy( (*the_individual_pattern).diag, diag, sizeof(double)*dim, cudaMemcpyHostToDevice ) );
//copy intensity
cuda_assert( cudaMemcpy( (*the_individual_pattern).I_exp, intensity, sizeof(double)*dim, cudaMemcpyHostToDevice ) );
}
cuda_pattern* make_cuda_pattern( unsigned long n, unsigned long ug_size; )
{
int total_gpus_avaliable = 0;
cuda_assert( cudaGetDeviceCount(&total_gpus_avaliable) );
assert( total_gpus_avaliable > 0 );
int const patterns_per_gpu = ( n + total_gpus_avaliable - 1 ) / total_gpus_avaliable;
int const patterns_on_last_gpu = n - patterns_per_gpu * ( total_gpus_avaliable - 1 );
//cuda_pattern
cuda_pattern* cp = new cuda_pattern;
(*cp).total_ug_size = ug_size;
(*cp).total_gpu = total_gpus_avaliable;
(*cp).total_cuda_pattern = n;
(*cp).pog = new pattern_on_gpu[total_gpus_avaliable];
//pattern on gpu
for ( int index = 0; index != total_gpus_avaliable - 1; ++index ) //will skip if only one gpu
{
((*cp).pog)[index].gpu_id = index;
((*cp).pog)[index].pattern_id_start = index * pattern_on_gpu;
((*cp).pog)[index].pattern_id_end = ( index + 1 ) * pattern_on_gpu;
((*cp).pog)[index].ip = new individual_pattern[patterns_per_gpu];
//!!GPU CODE
cuda_assert( cudaSetDevice(index) );
cuda_assert( cudaMalloc( &( ((*cp).pog)[index].ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
}
if ( patterns_on_last_gpu > 0 ) //the last gpu
{
int const index = total_gpus_avaliable - 1;
((*cp).pog)[index].gpu_id = index;
((*cp).pog)[index].pattern_id_start = index * pattern_on_gpu;
((*cp).pog)[index].pattern_id_end = ( index + 1 ) * pattern_on_gpu;
((*cp).pog)[index].ip = new individual_pattern[patterns_per_gpu];
//!!GPU CODE
cuda_assert( cudaSetDevice(index) );
cuda_assert( cudaMalloc( &( ((*cp).pog)[index].ug_thickness ), sizeof(double) * ( (*cp).total_ug_size * 2 + 1 ) ) );
}
//individual_pattern
retun cp;
}
|
b76ed6f531a6d695c24f55e5259baf61a8c07a12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 Foundation for Research and Technology - Hellas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 [1] [1]
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Links:
*
* [1] http://www.apache.org/licenses/LICENSE-2.0 [1]
*/
#include "../include/darkGrayArgs.h"
#include "../include/cu_darkGray.h"
#include "VineLibUtilsGPU.h"
#include <chrono>
/* Kernel for the device */
__global__ void rgb_gray(const int width, const int height,
const unsigned char *inputImage,
unsigned char *darkGrayImage) {
int x;
int y;
//volatile int i;
//for(i=0; i<100; ++i) {
// calculate the thread index for both x, y, by the use of the dimension
// of the block the id of the current block and the id of the thread
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// check if we are out of bounds
if ((y * width + x) > (width * height)) {
return;
}
// do the transformation
float grayPix = 0.0f;
float r = static_cast<float>(inputImage[(y * width) + x]);
float g = static_cast<float>(inputImage[(width * height) + (y * width) + x]);
float b =
static_cast<float>(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = ((0.3f * r) + (0.59f * g) + (0.11f * b));
grayPix = (grayPix * 0.6f) + 0.5f;
darkGrayImage[(y * width) + x] = static_cast<unsigned char>(grayPix);
//}
}
/* Function that calls the kernel*/
void cu_darkGray(const int width, const int height,
const unsigned char *inputImageDev,
unsigned char *darkGrayImageDev) {
int wBlock = static_cast<unsigned int>(ceil(width / static_cast<float>(32)));
int hBlock = static_cast<unsigned int>(ceil(height / static_cast<float>(16)));
dim3 dimGrid(wBlock, hBlock);
dim3 dimBlock(32, 16);
/* Kernel call */
rgb_gray << <dimGrid, dimBlock>>>
(width, height, inputImageDev, darkGrayImageDev);
}
/* Contains the code that is executed in Host*/
vine_task_state_e hostCode(vine_task_msg_s *vine_task) {
std::chrono::time_point<std::chrono::system_clock> start, end;
std::vector<void *> ioVector;
darkGrayArgs *argsCuda;
cout << "cu_darkgray execution in GPU." << endl;
/* Get the actual arguments*/
argsCuda = (darkGrayArgs *)vine_data_deref(vine_task->args.vine_data);
/* Allocate memory in the device and transfer data */
if (!Host2GPU(vine_task, ioVector)) {
cerr << "Host2GPU" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
#ifdef TIMERS_ENABLED
start = std::chrono::system_clock::now();
#endif
/* Call the function that calls the kernel */
cu_darkGray(argsCuda->width, argsCuda->height, (unsigned char *)ioVector[0],
(unsigned char *)ioVector[1]);
#ifdef TIMERS_ENABLED
end = std::chrono::system_clock::now();
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::chrono::duration<double, std::nano> elapsed_seconds = end - start;
cout << "DarkGray kernel execution time: " << elapsed_seconds.count()
<< " nanosecs." << endl;
#endif
/* Check for cuda errors*/
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
printf("Error: %s\n", hipGetErrorString(err));
return (task_failed);
}
/* Copy back the result from GPU*/
if (! (GPU2Host(vine_task, ioVector)) ) {
cerr << "GPU2Host" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
/* Free device memory*/
if (!GPUMemFree(ioVector)) {
cerr << "GPUMemFree" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
/* Execution was successful*/
return vine_task_stat(vine_task, 0);
}
/* register the function to the array for this .so*/
VINE_PROC_LIST_START()
VINE_PROCEDURE("darkGray", GPU, hostCode, sizeof(darkGrayArgs))
VINE_PROC_LIST_END()
| b76ed6f531a6d695c24f55e5259baf61a8c07a12.cu | /*
* Copyright 2018 Foundation for Research and Technology - Hellas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 [1] [1]
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Links:
*
* [1] http://www.apache.org/licenses/LICENSE-2.0 [1]
*/
#include "../include/darkGrayArgs.h"
#include "../include/cu_darkGray.h"
#include "VineLibUtilsGPU.h"
#include <chrono>
/* Kernel for the device */
__global__ void rgb_gray(const int width, const int height,
const unsigned char *inputImage,
unsigned char *darkGrayImage) {
int x;
int y;
//volatile int i;
//for(i=0; i<100; ++i) {
// calculate the thread index for both x, y, by the use of the dimension
// of the block the id of the current block and the id of the thread
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// check if we are out of bounds
if ((y * width + x) > (width * height)) {
return;
}
// do the transformation
float grayPix = 0.0f;
float r = static_cast<float>(inputImage[(y * width) + x]);
float g = static_cast<float>(inputImage[(width * height) + (y * width) + x]);
float b =
static_cast<float>(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = ((0.3f * r) + (0.59f * g) + (0.11f * b));
grayPix = (grayPix * 0.6f) + 0.5f;
darkGrayImage[(y * width) + x] = static_cast<unsigned char>(grayPix);
//}
}
/* Function that calls the kernel*/
void cu_darkGray(const int width, const int height,
const unsigned char *inputImageDev,
unsigned char *darkGrayImageDev) {
int wBlock = static_cast<unsigned int>(ceil(width / static_cast<float>(32)));
int hBlock = static_cast<unsigned int>(ceil(height / static_cast<float>(16)));
dim3 dimGrid(wBlock, hBlock);
dim3 dimBlock(32, 16);
/* Kernel call */
rgb_gray << <dimGrid, dimBlock>>>
(width, height, inputImageDev, darkGrayImageDev);
}
/* Contains the code that is executed in Host*/
vine_task_state_e hostCode(vine_task_msg_s *vine_task) {
std::chrono::time_point<std::chrono::system_clock> start, end;
std::vector<void *> ioVector;
darkGrayArgs *argsCuda;
cout << "cu_darkgray execution in GPU." << endl;
/* Get the actual arguments*/
argsCuda = (darkGrayArgs *)vine_data_deref(vine_task->args.vine_data);
/* Allocate memory in the device and transfer data */
if (!Host2GPU(vine_task, ioVector)) {
cerr << "Host2GPU" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
#ifdef TIMERS_ENABLED
start = std::chrono::system_clock::now();
#endif
/* Call the function that calls the kernel */
cu_darkGray(argsCuda->width, argsCuda->height, (unsigned char *)ioVector[0],
(unsigned char *)ioVector[1]);
#ifdef TIMERS_ENABLED
end = std::chrono::system_clock::now();
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::chrono::duration<double, std::nano> elapsed_seconds = end - start;
cout << "DarkGray kernel execution time: " << elapsed_seconds.count()
<< " nanosecs." << endl;
#endif
/* Check for cuda errors*/
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
printf("Error: %s\n", cudaGetErrorString(err));
return (task_failed);
}
/* Copy back the result from GPU*/
if (! (GPU2Host(vine_task, ioVector)) ) {
cerr << "GPU2Host" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
/* Free device memory*/
if (!GPUMemFree(ioVector)) {
cerr << "GPUMemFree" << endl;
cerr << __FILE__ << " Failed at " << __LINE__ << endl;
return (task_failed);
}
/* Execution was successful*/
return vine_task_stat(vine_task, 0);
}
/* register the function to the array for this .so*/
VINE_PROC_LIST_START()
VINE_PROCEDURE("darkGray", GPU, hostCode, sizeof(darkGrayArgs))
VINE_PROC_LIST_END()
|
5e7df886a3676a028ef4c9fbe7c2f2358751acc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Empty
// --------------------------------------------------------
TEST_CASE("Empty" * doctest::timeout(300)) {
std::atomic<int> counter{0};
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
executor.run_n(taskflow, 100).wait();
REQUIRE(counter == 300);
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
TEST_CASE("Memset") {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(int)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
// --------------------------------------------------------
// Testcase: Memset0
// --------------------------------------------------------
template <typename T>
void memset0() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Memset0.i8") {
memset0<int8_t>();
}
TEST_CASE("Memset0.i16") {
memset0<int16_t>();
}
TEST_CASE("Memset0.i32") {
memset0<int32_t>();
}
TEST_CASE("Memset0.f32") {
memset0<float>();
}
TEST_CASE("Memset0.f64") {
memset0<double>();
}
// --------------------------------------------------------
// Testcase: Memcpy
// --------------------------------------------------------
template <typename T>
void memcpy() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.memcpy(cpu, gpu, N*sizeof(T));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Memcpy.i8") {
memcpy<int8_t>();
}
TEST_CASE("Memcpy.i16") {
memcpy<int16_t>();
}
TEST_CASE("Memcpy.i32") {
memcpy<int32_t>();
}
TEST_CASE("Memcpy.f32") {
memcpy<float>();
}
TEST_CASE("Memcpy.f64") {
memcpy<double>();
}
// --------------------------------------------------------
// Testcase: fill
// --------------------------------------------------------
template <typename T>
void fill(T value) {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 107;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto fill = cf.fill(gpu+start, value, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(fill);
fill.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - value) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Fill.i8") {
fill<int8_t>(+123);
fill<int8_t>(-123);
}
TEST_CASE("Fill.i16") {
fill<int16_t>(+12345);
fill<int16_t>(-12345);
}
TEST_CASE("Fill.i32") {
fill<int32_t>(+123456789);
fill<int32_t>(-123456789);
}
TEST_CASE("Fill.f32") {
fill<float>(+123456789.0f);
fill<float>(-123456789.0f);
}
// --------------------------------------------------------
// Testcase: Zero
// --------------------------------------------------------
template <typename T>
void zero() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.zero(gpu+start, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Zero.i8") {
zero<int8_t>();
}
TEST_CASE("Zero.i16") {
zero<int16_t>();
}
TEST_CASE("Zero.i32") {
zero<int32_t>();
}
TEST_CASE("Zero.f32") {
zero<float>();
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// NestedRuns
// ----------------------------------------------------------------------------
TEST_CASE("NestedRuns") {
int* cpu = nullptr;
int* gpu = nullptr;
constexpr unsigned n = 1000;
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
struct A {
tf::Executor executor;
tf::Taskflow taskflow;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto A1 = taskflow.emplace([&](tf::cudaFlow& cf) {
cf.copy(gpu, cpu, n);
});
auto A2 = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
});
auto A3 = taskflow.emplace([&] (tf::cudaFlow& cf) {
cf.copy(cpu, gpu, n);
});
A1.precede(A2);
A2.precede(A3);
executor.run_n(taskflow, 10).wait();
}
};
struct B {
tf::Taskflow taskflow;
tf::Executor executor;
A a;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto B0 = taskflow.emplace([] () {});
auto B1 = taskflow.emplace([&] (tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto B2 = taskflow.emplace([&] () { a.run(cpu, gpu, n); });
auto B3 = taskflow.emplace([&] (tf::cudaFlow&) {
for(unsigned i=0; i<n; ++i) {
cpu[i]++;
}
});
B0.precede(B1);
B1.precede(B2);
B2.precede(B3);
executor.run_n(taskflow, 100).wait();
}
};
B b;
b.run(cpu, gpu, n);
for(unsigned i=0; i<n; i++) {
REQUIRE(cpu[i] == 1200);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
}
// ----------------------------------------------------------------------------
// WorkerID
// ----------------------------------------------------------------------------
void worker_id(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
REQUIRE(executor.num_workers() == (N + M));
REQUIRE(executor.num_domains() == 2);
const unsigned s = 1000;
for(unsigned k=0; k<s; ++k) {
auto cputask = taskflow.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
auto chktask = taskflow.emplace([&] () {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
taskflow.emplace([&]() {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto subflow = taskflow.emplace([&](tf::Subflow& sf){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
auto t1 = sf.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto t2 = sf.emplace([&](tf::cudaFlow&){
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
t1.precede(t2);
});
cputask.precede(gputask);
gputask.precede(chktask);
chktask.precede(subflow);
}
executor.run_n(taskflow, 100).wait();
}
TEST_CASE("WorkerID.1C1G") {
worker_id(1, 1);
}
TEST_CASE("WorkerID.1C2G") {
worker_id(1, 2);
}
TEST_CASE("WorkerID.1C3G") {
worker_id(1, 3);
}
TEST_CASE("WorkerID.1C4G") {
worker_id(1, 4);
}
TEST_CASE("WorkerID.2C1G") {
worker_id(2, 1);
}
TEST_CASE("WorkerID.2C2G") {
worker_id(2, 2);
}
TEST_CASE("WorkerID.2C3G") {
worker_id(2, 3);
}
TEST_CASE("WorkerID.2C4G") {
worker_id(2, 4);
}
TEST_CASE("WorkerID.3C1G") {
worker_id(3, 1);
}
TEST_CASE("WorkerID.3C2G") {
worker_id(3, 2);
}
TEST_CASE("WorkerID.3C3G") {
worker_id(3, 3);
}
TEST_CASE("WorkerID.3C4G") {
worker_id(3, 4);
}
TEST_CASE("WorkerID.4C1G") {
worker_id(4, 1);
}
TEST_CASE("WorkerID.4C2G") {
worker_id(4, 2);
}
TEST_CASE("WorkerID.4C3G") {
worker_id(4, 3);
}
TEST_CASE("WorkerID.4C4G") {
worker_id(4, 4);
}
// ----------------------------------------------------------------------------
// Multiruns
// ----------------------------------------------------------------------------
void multiruns(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
const unsigned n = 1000;
const unsigned s = 1000;
int *cpu[s] = {0};
int *gpu[s] = {0};
for(unsigned k=0; k<s; ++k) {
int number = ::rand()%100;
auto cputask = taskflow.emplace([&, k](){
cpu[k] = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu[k], n*sizeof(int)) == hipSuccess);
});
auto gputask = taskflow.emplace([&, k, number](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu[k], cpu[k], n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu[k], n, number);
auto d2h = cf.copy(cpu[k], gpu[k], n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto chktask = taskflow.emplace([&, k, number] () {
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[k][i] == number);
}
});
cputask.precede(gputask);
gputask.precede(chktask);
}
executor.run(taskflow).wait();
}
TEST_CASE("Multiruns.1C1G") {
multiruns(1, 1);
}
TEST_CASE("Multiruns.1C2G") {
multiruns(1, 2);
}
TEST_CASE("Multiruns.1C3G") {
multiruns(1, 3);
}
TEST_CASE("Multiruns.1C4G") {
multiruns(1, 4);
}
TEST_CASE("Multiruns.2C1G") {
multiruns(2, 1);
}
TEST_CASE("Multiruns.2C2G") {
multiruns(2, 2);
}
TEST_CASE("Multiruns.2C3G") {
multiruns(2, 3);
}
TEST_CASE("Multiruns.2C4G") {
multiruns(2, 4);
}
TEST_CASE("Multiruns.3C1G") {
multiruns(3, 1);
}
TEST_CASE("Multiruns.3C2G") {
multiruns(3, 2);
}
TEST_CASE("Multiruns.3C3G") {
multiruns(3, 3);
}
TEST_CASE("Multiruns.3C4G") {
multiruns(3, 4);
}
TEST_CASE("Multiruns.4C1G") {
multiruns(4, 1);
}
TEST_CASE("Multiruns.4C2G") {
multiruns(4, 2);
}
TEST_CASE("Multiruns.4C3G") {
multiruns(4, 3);
}
TEST_CASE("Multiruns.4C4G") {
multiruns(4, 4);
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
TEST_CASE("Subflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("NestedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("DetachedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
TEST_CASE("Loop") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Predicate
// ----------------------------------------------------------------------------
TEST_CASE("Predicate") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
REQUIRE(hipMemcpy(gpu, cpu, n*sizeof(int), hipMemcpyHostToDevice) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_until([i=100]() mutable { return i-- == 0; });
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Repeat
// ----------------------------------------------------------------------------
TEST_CASE("Repeat") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
REQUIRE(hipMemcpy(gpu, cpu, n*sizeof(int), hipMemcpyHostToDevice) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_n(100);
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
| 5e7df886a3676a028ef4c9fbe7c2f2358751acc8.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Empty
// --------------------------------------------------------
TEST_CASE("Empty" * doctest::timeout(300)) {
std::atomic<int> counter{0};
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
taskflow.emplace([&](tf::cudaFlow&){
++counter;
});
executor.run_n(taskflow, 100).wait();
REQUIRE(counter == 300);
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
TEST_CASE("Memset") {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(int)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
// --------------------------------------------------------
// Testcase: Memset0
// --------------------------------------------------------
template <typename T>
void memset0() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memset0.i8") {
memset0<int8_t>();
}
TEST_CASE("Memset0.i16") {
memset0<int16_t>();
}
TEST_CASE("Memset0.i32") {
memset0<int32_t>();
}
TEST_CASE("Memset0.f32") {
memset0<float>();
}
TEST_CASE("Memset0.f64") {
memset0<double>();
}
// --------------------------------------------------------
// Testcase: Memcpy
// --------------------------------------------------------
template <typename T>
void memcpy() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.memcpy(cpu, gpu, N*sizeof(T));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memcpy.i8") {
memcpy<int8_t>();
}
TEST_CASE("Memcpy.i16") {
memcpy<int16_t>();
}
TEST_CASE("Memcpy.i32") {
memcpy<int32_t>();
}
TEST_CASE("Memcpy.f32") {
memcpy<float>();
}
TEST_CASE("Memcpy.f64") {
memcpy<double>();
}
// --------------------------------------------------------
// Testcase: fill
// --------------------------------------------------------
template <typename T>
void fill(T value) {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 107;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto fill = cf.fill(gpu+start, value, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(fill);
fill.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - value) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Fill.i8") {
fill<int8_t>(+123);
fill<int8_t>(-123);
}
TEST_CASE("Fill.i16") {
fill<int16_t>(+12345);
fill<int16_t>(-12345);
}
TEST_CASE("Fill.i32") {
fill<int32_t>(+123456789);
fill<int32_t>(-123456789);
}
TEST_CASE("Fill.f32") {
fill<float>(+123456789.0f);
fill<float>(-123456789.0f);
}
// --------------------------------------------------------
// Testcase: Zero
// --------------------------------------------------------
template <typename T>
void zero() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.zero(gpu+start, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Zero.i8") {
zero<int8_t>();
}
TEST_CASE("Zero.i16") {
zero<int16_t>();
}
TEST_CASE("Zero.i32") {
zero<int32_t>();
}
TEST_CASE("Zero.f32") {
zero<float>();
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// NestedRuns
// ----------------------------------------------------------------------------
TEST_CASE("NestedRuns") {
int* cpu = nullptr;
int* gpu = nullptr;
constexpr unsigned n = 1000;
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
struct A {
tf::Executor executor;
tf::Taskflow taskflow;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto A1 = taskflow.emplace([&](tf::cudaFlow& cf) {
cf.copy(gpu, cpu, n);
});
auto A2 = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
});
auto A3 = taskflow.emplace([&] (tf::cudaFlow& cf) {
cf.copy(cpu, gpu, n);
});
A1.precede(A2);
A2.precede(A3);
executor.run_n(taskflow, 10).wait();
}
};
struct B {
tf::Taskflow taskflow;
tf::Executor executor;
A a;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto B0 = taskflow.emplace([] () {});
auto B1 = taskflow.emplace([&] (tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto B2 = taskflow.emplace([&] () { a.run(cpu, gpu, n); });
auto B3 = taskflow.emplace([&] (tf::cudaFlow&) {
for(unsigned i=0; i<n; ++i) {
cpu[i]++;
}
});
B0.precede(B1);
B1.precede(B2);
B2.precede(B3);
executor.run_n(taskflow, 100).wait();
}
};
B b;
b.run(cpu, gpu, n);
for(unsigned i=0; i<n; i++) {
REQUIRE(cpu[i] == 1200);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
// ----------------------------------------------------------------------------
// WorkerID
// ----------------------------------------------------------------------------
void worker_id(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
REQUIRE(executor.num_workers() == (N + M));
REQUIRE(executor.num_domains() == 2);
const unsigned s = 1000;
for(unsigned k=0; k<s; ++k) {
auto cputask = taskflow.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
auto chktask = taskflow.emplace([&] () {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
taskflow.emplace([&]() {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto subflow = taskflow.emplace([&](tf::Subflow& sf){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
auto t1 = sf.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto t2 = sf.emplace([&](tf::cudaFlow&){
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
t1.precede(t2);
});
cputask.precede(gputask);
gputask.precede(chktask);
chktask.precede(subflow);
}
executor.run_n(taskflow, 100).wait();
}
TEST_CASE("WorkerID.1C1G") {
worker_id(1, 1);
}
TEST_CASE("WorkerID.1C2G") {
worker_id(1, 2);
}
TEST_CASE("WorkerID.1C3G") {
worker_id(1, 3);
}
TEST_CASE("WorkerID.1C4G") {
worker_id(1, 4);
}
TEST_CASE("WorkerID.2C1G") {
worker_id(2, 1);
}
TEST_CASE("WorkerID.2C2G") {
worker_id(2, 2);
}
TEST_CASE("WorkerID.2C3G") {
worker_id(2, 3);
}
TEST_CASE("WorkerID.2C4G") {
worker_id(2, 4);
}
TEST_CASE("WorkerID.3C1G") {
worker_id(3, 1);
}
TEST_CASE("WorkerID.3C2G") {
worker_id(3, 2);
}
TEST_CASE("WorkerID.3C3G") {
worker_id(3, 3);
}
TEST_CASE("WorkerID.3C4G") {
worker_id(3, 4);
}
TEST_CASE("WorkerID.4C1G") {
worker_id(4, 1);
}
TEST_CASE("WorkerID.4C2G") {
worker_id(4, 2);
}
TEST_CASE("WorkerID.4C3G") {
worker_id(4, 3);
}
TEST_CASE("WorkerID.4C4G") {
worker_id(4, 4);
}
// ----------------------------------------------------------------------------
// Multiruns
// ----------------------------------------------------------------------------
void multiruns(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
const unsigned n = 1000;
const unsigned s = 1000;
int *cpu[s] = {0};
int *gpu[s] = {0};
for(unsigned k=0; k<s; ++k) {
int number = ::rand()%100;
auto cputask = taskflow.emplace([&, k](){
cpu[k] = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu[k], n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&, k, number](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu[k], cpu[k], n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu[k], n, number);
auto d2h = cf.copy(cpu[k], gpu[k], n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto chktask = taskflow.emplace([&, k, number] () {
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[k][i] == number);
}
});
cputask.precede(gputask);
gputask.precede(chktask);
}
executor.run(taskflow).wait();
}
TEST_CASE("Multiruns.1C1G") {
multiruns(1, 1);
}
TEST_CASE("Multiruns.1C2G") {
multiruns(1, 2);
}
TEST_CASE("Multiruns.1C3G") {
multiruns(1, 3);
}
TEST_CASE("Multiruns.1C4G") {
multiruns(1, 4);
}
TEST_CASE("Multiruns.2C1G") {
multiruns(2, 1);
}
TEST_CASE("Multiruns.2C2G") {
multiruns(2, 2);
}
TEST_CASE("Multiruns.2C3G") {
multiruns(2, 3);
}
TEST_CASE("Multiruns.2C4G") {
multiruns(2, 4);
}
TEST_CASE("Multiruns.3C1G") {
multiruns(3, 1);
}
TEST_CASE("Multiruns.3C2G") {
multiruns(3, 2);
}
TEST_CASE("Multiruns.3C3G") {
multiruns(3, 3);
}
TEST_CASE("Multiruns.3C4G") {
multiruns(3, 4);
}
TEST_CASE("Multiruns.4C1G") {
multiruns(4, 1);
}
TEST_CASE("Multiruns.4C2G") {
multiruns(4, 2);
}
TEST_CASE("Multiruns.4C3G") {
multiruns(4, 3);
}
TEST_CASE("Multiruns.4C4G") {
multiruns(4, 4);
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
TEST_CASE("Subflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("NestedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("DetachedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
TEST_CASE("Loop") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Predicate
// ----------------------------------------------------------------------------
TEST_CASE("Predicate") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_until([i=100]() mutable { return i-- == 0; });
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Repeat
// ----------------------------------------------------------------------------
TEST_CASE("Repeat") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_n(100);
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
|
05aecd2d288434c5b98e1cd3763e07cc918cbfa1.hip | // !!! This is a file automatically generated by hipify!!!
#define TIME 1
//#define PSKEL_LOGMODE 1
//#define TBB_USE_DEBUG 1
#define PSKEL_CUDA
#include "PSkel.h"
#include "hr_time.h"
#include <omp.h>
#include <fstream>
#include <string>
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <cmath>
#include <cassert>
#ifndef PSKEL_NEUMAN
#ifndef PSKEL_MOORE
#define PSKEL_NEUMAN
#endif
#else
#ifndef PSKEL_MOORE
#ifndef PSKEL_NEUMAN
#define PSKEL_MOORE
#endif
#endif
#endif
#ifndef NUM_ADD
#define NUM_ADD 1
#endif
#ifndef NUM_MULT
#define NUM_MULT 0
#endif
#ifndef MASK_RADIUS
#define MASK_RADIUS 1
#endif
using namespace std;
using namespace PSkel;
struct Arguments{
//int neighborhood, radius, numAdd, numSub, numMult, numDiv, numPow, numSqrt, numFma;
int numAdd, numMult,radius;
int loopControlAdd, loopControlMult, opControlAdd, opControlMult;
float arg1, arg2, arg3, arg4, arg5, arg6, arg7;
Arguments(){
//neighborhood = 0;
radius = 2;
numAdd = 10;
//numSub = 0;
numMult = 0;
//numDiv = 0;
//numPow = 0;
//numSqrt = 0;
//numFma = 0;
}
//Arguments(int nb, int r, int nAdd, int nSub, int nMult, int nDiv, int nPow, int nSqrt, int nFma){
Arguments(int r, int nAdd, int nMult, int maskSize){
//neighborhood = nb;
radius = r;
numAdd = nAdd;
//numSub = nSub;
numMult = nMult;
//numDiv = nDiv;
//numPow = nPow;
//numSqrt = nSqrt;
//numFma = nFma;
loopControlAdd = numAdd > 0 ? (numAdd-1) /maskSize + 1 : 0;
loopControlMult = numMult > 0 ? (numMult-1)/maskSize + 1 : 0;
opControlAdd = numAdd > 0 ? radius : 0;
opControlMult = numMult > 0 ? radius : 0;
arg1 = (float) rand()/RAND_MAX;
arg2 = (float) rand()/RAND_MAX;
arg3 = (float) rand()/RAND_MAX;
arg4 = (float) rand()/RAND_MAX;
arg5 = (float) rand()/RAND_MAX;
arg6 = (float) rand()/RAND_MAX;
arg7 = (float) rand()/RAND_MAX;
//cout<<"Arguments: "<< arg1 << " " << arg2 << " "<< arg3 << " " << arg4 << " " << arg5 << endl;
}
};
namespace PSkel{
#ifdef PSKEL_INT
__parallel__ void stencilKernel(Array2D<int> &input,Array2D<int> &output,Mask2D<int> &mask,Arguments &args, size_t h, size_t w){
int returnValue; // = input(h,w);
int L1, L2, L3, L4, L5, L6, L7;
#else
__parallel__ void stencilKernel(Array2D<float> &input,Array2D<float> &output,Mask2D<float> &mask,Arguments &args, size_t i, size_t j){
float returnValue; // = input(i,j);
float L1, L2, L3, L4, L5, L6, L7;
#endif
#ifdef PSKEL_NEUMAN
#if MASK_RADIUS == 1
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 1 ADD 1 MULT 1\n");
L1 = input(i-1,j) * args.arg1;
L2 = input(i ,j-1) * args.arg2 + input(i,j+1) * args.arg3;
L3 = input(i+1,j) * args.arg4;
returnValue = L1 + L2 + L3;
#else
//printf("Neuman RADIUS 1 ADD 1 MULT 0\n");
L1 = input(i-1,j);
L2 = input(i ,j-1) + input(i,j+1);
L3 = input(i+1,j);
returnValue = L1 + L2 + L3;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 1 ADD 0 MULT 1\n");
L1 = input(i-1,j);
L2 = input(i ,j-1) * input(i,j+1);
L3 = input(i+1,j);
returnValue = L1 * L2 * L3;
#endif
#elif MASK_RADIUS == 2
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 2 ADD 1 MULT 1\n");
L1 = input(i-2,j) * args.arg1;
L2 = input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4;
L3 = input(i,j-2) * args.arg1 + input(i ,j-1) * args.arg2 + input(i ,j+1) * args.arg3 + input(i,j+2) * args.arg4;
L4 = input(i+1,j-1) * args.arg4 + input(i+1,j) * args.arg5 + input(i+1,j+1) * args.arg6;
L5 = input(i+2,j) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5;
#else
//printf("Neuman RADIUS 2 ADD 1 MULT 0\n");
L1 = input(i-2,j);
L2 = input(i-1,j-1) + input(i-1,j) + input(i-1,j+1);
L3 = input(i,j-2) + input(i ,j-1) + input(i ,j+1) + input(i,j+2);
L4 = input(i+1,j-1) + input(i+1,j) + input(i+1,j+1);
L5 = input(i+2,j);
returnValue = L1 + L2 + L3 + L4 + L5;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 2 ADD 0 MULT 1\n");
L1 = input(i-2,j);
L2 = input(i-1,j-1) * input(i-1,j) * input(i-1,j+1);
L3 = input(i,j-2) * input(i ,j-1) * input(i ,j+1) * input(i,j+2);
L4 = input(i+1,j-1) * input(i+1,j) * input(i+1,j+1);
L5 = input(i+2,j);
returnValue = L1 * L2 * L3 * L4 * L5;
#endif
#else
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 3 ADD 1 MULT 1\n");
L1 = input(i-3,j) * args.arg1;
L2 = input(i-2,j-1) * args.arg1 + input(i-2,j) * args.arg2 + input(i-2,j+1);
L3 = input(i-1,j-2) * args.arg1 + input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4 + input(i-1,j+2) * args.arg5 ;
L4 = input(i,j-3) * args.arg1 + input(i ,j-2) * args.arg2 + input(i ,j-1) * args.arg3 + input(i ,j+1) * args.arg5 + input(i ,j+2) * args.arg6 + input(i,j+3) * args.arg7;
L5 = input(i+1,j-2) * args.arg3 + input(i+1,j-1) * args.arg4 + input(i+1,j) * args.arg5 + input(i+1,j+1) * args.arg6 + input(i+1,j+2) * args.arg7 ;
L6 = input(i+2,j-1) * args.arg5 + input(i+2,j) * args.arg6 + input(i+2,j+1);
L7 = input(i+3,j) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#else
//printf("Neuman RADIUS 3 ADD 1 MULT 0\n");
L1 = input(i-3,j);
L2 = input(i-2,j-1) + input(i-2,j) + input(i-2,j+1);
L3 = input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2);
L4 = input(i,j-3) + input(i ,j-2) + input(i ,j-1) + input(i ,j+1) + input(i ,j+2) + input(i,j+3);
L5 = input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2);
L6 = input(i+2,j-1) + input(i+2,j) + input(i+2,j+1);
L7 = input(i+3,j);
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 3 ADD 0 MULT 1\n");
L1 = input(i-3,j);
L2 = input(i-2,j-1) * input(i-2,j) * input(i-2,j+1);
L3 = input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2);
L4 = input(i,j-3) * input(i ,j-2) * input(i ,j-1) * input(i ,j+1) * input(i ,j+2) * input(i,j+3);
L5 = input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2);
L6 = input(i+2,j-1) * input(i+2,j) * input(i+2,j+1);
L7 = input(i+3,j);
returnValue = L1 * L2 * L3 * L4 * L5 * L6 * L7;
#endif
#endif
#else
#ifdef PSKEL_MOORE
#if MASK_RADIUS == 1
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 1 ADD 1 MULT 1\n");
L1 = input(i-1,j-1) * args.arg1 + input(i-1,j) * args.arg2 + input(i-1,j+1) * args.arg3;
L2 = input(i, j-1) * args.arg1 + input(i, j) * args.arg2 + input(i, j+1) * args.arg3;
L3 = input(i+1,j-1) * args.arg1 + input(i+1,j) * args.arg2 + input(i+1,j+1) * args.arg3;
returnValue = L1 + L2 + L3;
#else
//printf("Moore RADIUS 1 ADD 1 MULT 0\n");
L1 = input(i-1,j-1) + input(i-1,j) + input(i-1,j+1);
L2 = input(i, j-1) + input(i ,j) + input(i, j+1);
L3 = input(i+1,j-1) + input(i+1,j) + input(i+1,j+1);
returnValue = L1 + L2 + L3;
#endif
#elif NUM_MULT == 1
//printf("Moore RADIUS 1 ADD 0 MULT 1\n");
L1 = input(i-1,j-1) * input(i-1,j) * input(i-1,j+1);
L2 = input(i ,j-1) * input(i ,j) * input(i ,j+1);
L3 = input(i+1,j-1) * input(i+1,j) * input(i+1,j+1);
returnValue = L1 * L2 * L3;
#endif
#elif MASK_RADIUS == 2
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 2 ADD 1 MULT 1\n");
L1 = input(i-2,j-2) * args.arg1 + input(i-2,j-1) * args.arg2 + input(i-2,j) * args.arg3 + input(i-2,j+1) * args.arg4 + input(i-2,j+2) * args.arg5;
L2 = input(i-1,j-2) * args.arg1 + input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4 + input(i-1,j+2) * args.arg5;
L3 = input(i ,j-2) * args.arg1 + input(i, j-1) * args.arg2 + input(i ,j) * args.arg3 + input(i, j+1) * args.arg4 + input(i ,j+2) * args.arg5;
L4 = input(i+1,j-2) * args.arg1 + input(i+1,j-1) * args.arg2 + input(i+1,j) * args.arg3 + input(i+1,j+1) * args.arg4 + input(i+1,j+2) * args.arg5;
L5 = input(i+2,j-2) * args.arg1 + input(i+2,j-1) * args.arg2 + input(i+2,j) * args.arg3 + input(i+2,j+1) * args.arg4 + input(i+2,j+2) * args.arg5;
returnValue = L1 + L2 + L3 + L4 + L5;
#else
//printf("Moore RADIUS 2 ADD 1 MULT 0\n");
L1 = input(i-2,j-2) + input(i-2,j-1) + input(i-2,j) + input(i-2,j+1) + input(i-2,j+2);
L2 = input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2);
L3 = input(i ,j-2) + input(i, j-1) + input(i ,j) + input(i, j+1) + input(i ,j+2);
L4 = input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2);
L5 = input(i+2,j-2) + input(i+2,j-1) + input(i+2,j) + input(i+2,j+1) + input(i+2,j+2);
returnValue = L1 + L2 + L3 + L4 + L5;
#endif
#elif NUM_MULT == 1
//printf("Moore RADIUS 2 ADD 0 MULT 1\n");
L1 = input(i-2,j-2) * input(i-2,j-1) * input(i-2,j) * input(i-2,j+1) * input(i-2,j+2);
L2 = input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2);
L3 = input(i ,j-2) * input(i, j-1) * input(i ,j) * input(i, j+1) * input(i ,j+2);
L4 = input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2);
L5 = input(i+2,j-2) * input(i+2,j-1) * input(i+2,j) * input(i+2,j+1) * input(i+2,j+2);
returnValue = L1 * L2 * L3 * L4 * L5;
#endif
#else
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 3 ADD 1 MULT 1\n");
L1 = input(i-3,j-3) * args.arg1 + input(i-3,j-2) * args.arg2 + input(i-3,j-1) * args.arg3 + input(i-3,j) * args.arg4 + input(i-3,j+1) * args.arg5 + input(i-3,j+2) * args.arg6 + input(i-3,j+3) * args.arg7;
L2 = input(i-2,j-3) * args.arg1 + input(i-2,j-2) * args.arg2 + input(i-2,j-1) * args.arg3 + input(i-2,j) * args.arg4 + input(i-2,j+1) * args.arg5 + input(i-2,j+2) * args.arg6 + input(i-2,j+3) * args.arg7;
L3 = input(i-1,j-3) * args.arg1 + input(i-1,j-2) * args.arg2 + input(i-1,j-1) * args.arg3 + input(i-1,j) * args.arg4 + input(i-1,j+1) * args.arg5 + input(i-1,j+2) * args.arg6 + input(i-1,j+3) * args.arg7;
L4 = input(i ,j-3) * args.arg1 + input(i ,j-2) * args.arg2 + input(i, j-1) * args.arg3 + input(i ,j) * args.arg4 + input(i, j+1) * args.arg5 + input(i ,j+2) * args.arg6 + input(i ,j+3) * args.arg7;
L5 = input(i+1,j-3) * args.arg1 + input(i+1,j-2) * args.arg2 + input(i+1,j-1) * args.arg3 + input(i+1,j) * args.arg4 + input(i+1,j+1) * args.arg5 + input(i+1,j+2) * args.arg6 + input(i+1,j+3) * args.arg7;
L6 = input(i+2,j-3) * args.arg1 + input(i+2,j-2) * args.arg2 + input(i+2,j-1) * args.arg3 + input(i+2,j) * args.arg4 + input(i+2,j+1) * args.arg5 + input(i+2,j+2) * args.arg6 + input(i+2,j+3) * args.arg7;
L7 = input(i+3,j-3) * args.arg1 + input(i+3,j-2) * args.arg2 + input(i+3,j-1) * args.arg3 + input(i+3,j) * args.arg4 + input(i+3,j+1) * args.arg5 + input(i+3,j+2) * args.arg6 + input(i+3,j+3) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#else
//printf("Moore RADIUS 3 ADD 1 MULT 0\n");
L1 = input(i-3,j-3) + input(i-3,j-2) + input(i-3,j-1) + input(i-3,j) + input(i-3,j+1) + input(i-3,j+2) + input(i-3,j+3);
L2 = input(i-2,j-3) + input(i-2,j-2) + input(i-2,j-1) + input(i-2,j) + input(i-2,j+1) + input(i-2,j+2) + input(i-2,j+3);
L3 = input(i-1,j-3) + input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2) + input(i-1,j+3);
L4 = input(i ,j-3) + input(i ,j-2) + input(i, j-1) + input(i ,j) + input(i, j+1) + input(i ,j+2) + input(i ,j+3);
L5 = input(i+1,j-3) + input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2) + input(i+1,j+3);
L6 = input(i+2,j-3) + input(i+2,j-2) + input(i+2,j-1) + input(i+2,j) + input(i+2,j+1) + input(i+2,j+2) + input(i+2,j+3);
L7 = input(i+3,j-3) + input(i+3,j-2) + input(i+3,j-1) + input(i+3,j) + input(i+3,j+1) + input(i+3,j+2) + input(i+3,j+3);
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#endif
#elif NUM_MULT==1
//printf("Moore RADIUS 3 ADD 0 MULT 1\n");
L1 = input(i-3,j-3) * input(i-3,j-2) * input(i-3,j-1) * input(i-3,j) * input(i-3,j+1) * input(i-3,j+2) * input(i-3,j+3);
L2 = input(i-2,j-3) * input(i-2,j-2) * input(i-2,j-1) * input(i-2,j) * input(i-2,j+1) * input(i-2,j+2) * input(i-2,j+3);
L3 = input(i-1,j-3) * input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2) * input(i-1,j+3);
L4 = input(i ,j-3) * input(i ,j-2) * input(i, j-1) * input(i ,j) * input(i, j+1) * input(i ,j+2) * input(i ,j+3);
L5 = input(i+1,j-3) * input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2) * input(i+1,j+3);
L6 = input(i+2,j-3) * input(i+2,j-2) * input(i+2,j-1) * input(i+2,j) * input(i+2,j+1) * input(i+2,j+2) * input(i+2,j+3);
L7 = input(i+3,j-3) * input(i+3,j-2) * input(i+3,j-1) * input(i+3,j) * input(i+3,j+1) * input(i+3,j+2) * input(i+3,j+3);
returnValue = L1 * L2 * L3 * L4 * L5 * L6 * L7;
#endif
#endif
#endif
#endif
/*int loopControl;
int opControl;
int i,j,k,fim,ini;
//loopControl = args.numAdd > 0 ? (args.numAdd-1)/mask.size + 1 : 0; //estava assim originalmente
//opControl = args.numAdd > mask.size ? mask.size : args.numAdd;
//opControl = args.numAdd > 0 ? args.radius : 0;
loopControl = args.loopControlAdd;
opControl = args.opControlAdd;
#ifdef PSKEL_NEUMAN
for(int i = 0; i < loopControl; i++){
fim = 0;
ini = 0;
k = 0;
for (j = -opControl; j <= 0; j++) {
for(k = ini; k <= fim; k++){
//if(j != 0 || k !=0){
returnValue = returnValue + input(h+j,w+k);
//}
}
ini--;
fim++;
}
ini+=2;
fim-=2;
for(j = 1; j <= opControl; j++){
for(k = ini; k <= fim; k++){
returnValue = returnValue + input(h+j,w+k);
}
ini++;
fim--;
}
}
#else
#ifdef PSKEL_MOORE
//Adio
//loopControl = ceil(float(args.numAdd)/float(mask.size));
//loopControl = args.numAdd > 0 ? (args.numAdd-1)/mask.size + 1 : 0; //estava assim originalmente
//opControl = args.numAdd > mask.size ? mask.size : args.numAdd;
//opControl = args.numAdd > 0 ? args.radius : 0;
//loopControl = loopControl/2;
//printf("Executing ADD loopControl: %d opControl: %d\n",loopControl,opControl);
for(i = 0; i<loopControl; i++){
//for(int i = -loopControl; i <= loopControl; i++){
for(j = -opControl;j <= opControl; j++){
for(k = -opControl; k <= opControl; k++){ //for(int k = 0; k < opControl; k++){
//returnValue = returnValue + mask.get(j,input,h,w);
//returnValue = returnValue + mask.getWeight(j);
returnValue = returnValue + input(h+j,w+k);
}
}
}
#endif
#endif
//Multiplicao
//loopControl = ceil(float(args.numMult)/float(mask.size));
//loopControl = args.numMult > 0 ? (args.numMult-1)/mask.size + 1 : 0;
//opControl = args.numMult > mask.size ? mask.size: args.numMult;
//opControl = args.numMult > 0 ? args.radius : 0;
//loopControl = loopControl/2;
//printf("Executing MULT loopControl: %d opControl: %d\n",loopControl,opControl);
loopControl = args.loopControlMult;
opControl = args.opControlMult;
#ifdef PSKEL_NEUMAN
for(i = 0; i < loopControl; i++){
fim = 0;
ini = 0;
k = 0;
for (j = -opControl; j <= 0; j++) {
for(k = ini; k<= fim; k++){
//if(j != 0 || k !=0){
returnValue = returnValue * input(h+j,w+k);
//}
}
ini--;
fim++;
}
ini+=2;
fim-=2;
for(j = 1; j <= opControl; j++){
for(k = ini; k <= fim; k++){
returnValue = returnValue * input(h+j,w+k);
}
ini++;
fim--;
}
}
#else
#ifdef PSKEL_MOORE
for(i = 0; i<loopControl; i++){
for(j = -opControl; j <= opControl; j++){
for(k = -opControl; k <= opControl; k++){
//returnValue = returnValue * mask.get(j,input,h,w);
//returnValue = returnValue * mask.getWeight(j);
returnValue = returnValue * input(h+j,w+k);
}
}
}
#endif
#endif
*/
//Divisao
/*loopControl = (args.numDiv-1)/mask.size + 1;
opControl = args.numDiv>mask.size?mask.size:args.numDiv;
for(int i = 0; i<loopControl; i++){
for(int j = 0; j<opControl; j++){
returnValue = returnValue / mask.get(j,input,h,w);
}
}
*/
output(i,j) = returnValue;
}
}
int main(int argc, char **argv){
//hr_timer_t timer_a;
//hrt_start(&timer_a);
int width, height, iterations, maskType,maskRange,GPUBlockSize, numCPUThreads, maskSize,writeToFile;
int nAdd, nMult;
//int nSub, nDiv, nPow, nFma;
float GPUTime;
if (argc != 11){
printf ("Wrong number of parameters.\n");
//printf ("Usage: synthetic WIDTH HEIGHT ITERATIONS GPUTIME GPUBLOCKS CPUTHREADS MASKTYPE MASKRANGE NumADDS NumSUBS NumMults NumDivs NumPows NumSqrts NumFmas OUTPUT_WRITE_FLAG\n");
printf ("Usage: synthetic WIDTH HEIGHT ITERATIONS GPUTIME GPUBLOCKS CPUTHREADS MASKRANGE NumADDS NumMults OUTPUT_WRITE_FLAG\n"); //Masktype is now defined from #ifdef
exit(-1);
}
srand(time(NULL));
width = atoi (argv[1]);
height = atoi (argv[2]);
iterations = atoi (argv[3]);
GPUTime = atof(argv[4]);
GPUBlockSize = atoi(argv[5]);
numCPUThreads = atoi(argv[6]);
#ifdef PSKEL_NEUMAN
maskType = 0;
//cout<<"Neuman"<<endl;
#else
maskType = 1;
//cout<<"Moore"<<endl;
#endif
//maskRange = atoi (argv[7]);
maskRange = MASK_RADIUS;
//nAdd = atoi(argv[8]) ;
nAdd = NUM_ADD;
//nSub = 0; //atoi(argv[10]);
//nMult = atoi(argv[9]) ;
nMult = NUM_MULT;
//nDiv = 0; //atoi(argv[12]);
//nPow = 0; //atoi(argv[13]);
//nSqrt = 0; //atoi(argv[14]);
//nFma = 0; //atoi(argv[15]);
writeToFile = atoi(argv[10]);
if(nAdd == 0 && nMult == 0){
printf("The number of Adds and Mults are 0!\n");
exit(-1);
}
#ifdef PSKEL_INT
Array2D<int> inputGrid(width, height);
Array2D<int> outputGrid(width, height);
#else
Array2D<float> inputGrid(width, height);
Array2D<float> outputGrid(width, height);
#endif
/*for(int h=0; h<inputGrid.getHeight(); h++)
for(int w=0; w<inputGrid.getWidth(); w++)
inputGrid(h,w) = h*inputGrid.getWidth()+w;
*/
#pragma omp parallel num_threads(numCPUThreads)
{
unsigned int seed = 1234 + 17 * omp_get_thread_num();
#pragma omp for
for (int x = 0; x < height; x++){
for (int y = 0; y < width; y++){
//#ifdef PSKEL_INT
//inputGrid(x,y) = 1 + rand()%99;
//outputGrid(x,y) = 1;
// #else
inputGrid(x,y) = (1.0 + rand_r(&seed)%9) + 1.0/(1+rand_r(&seed)%100);
outputGrid(x,y) = 1.0;
// #endif
}
}
}
//Calculate the mask size based on neighborhood type. 0 (zero) for Von Neumann >1 (more than one) for Moore.
if(maskType == 0){
//Neumann number 2r(r+1)+1,
//maskSize = ((2 * args.radius)*args.radius) + (2 * args.radius) + 1;
maskSize = 1 + ((2 * maskRange ) * ( maskRange + 1));
}else{
//Moore (2r + 1)^2
maskSize = (2 * maskRange + 1) * (2 * maskRange + 1);
}
//maskSize = number of cells in neighborhood - 1 (the center cell);
#ifdef PSKEL_INT
Mask2D<int> mask(maskSize - 1,1);
#else
Mask2D<float> mask(maskSize - 1,1.0);
#endif
nAdd = nAdd * (maskSize - 1);
//nSub = 0; //atoi(argv[10]);
nMult = nMult * (maskSize - 1);
//Arguments args(maskType, maskRange, nAdd, nSub, nMult, nDiv, nPow, nSqrt, nFma);
Arguments args(maskRange,nAdd, nMult,maskSize);
srand(1234);
//cout<<"MASK VALUES"<<endl;
if(maskType == 0){
//Set mask for Neumann neighborhood
int idx = 0;
int fim = 0;
int ini = 0;
int w = 0;
int h;
for (h = -maskRange; h <= 0; h++) {
for(w = ini; w <= fim; w++){
if(h != 0 || w !=0){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<<" "<<weight<<endl;
//cout << idx <<", "<< h <<", "<< w <<"\n";
idx++;
}
}
ini --;
fim ++;
}
ini+=2;
fim-=2;
for(h = 1; h <= maskRange; h++){
for(w = ini; w <= fim; w++){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<< " "<<weight<<endl;
idx++;
}
ini++;
fim--;
}
}
else{
//Set mask for Moore neighborhood
int idx = 0;
int h, w;
for(h = -maskRange; h <= maskRange; h++){
for(w = -maskRange; w <= maskRange; w ++){
if(h != 0 || w != 0){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<< " "<<weight<<endl;
idx ++;
}
}
}
}
//cout <<"\n";
//cout << "Width: " << width << "; Height: " << height << ";\n";
//cout << "Iterations: " << iterations << endl;
//cout << "MaskType: " << maskType << endl;
//cout << "MaskRange: " << maskRange << endl;
//cout << "Neighbors: " << mask.size << endl;
//cout << "GPU Time: " << GPUTime << endl;
//cout << "GPU Block size: " << GPUBlockSize << endl;
//cout << "CPU Threads: " << numCPUThreads << endl;
//cout << "Num Add: " << args.numAdd << endl;
//cout << "Num Sub: " << args.numSub << endl;
//cout << "Num Mult: " << args.numMult << endl;
//cout << "Num Div: " << args.numDiv << endl;
//cout << "Num Pow: " << args.numPow << endl;
//cout << "Num Sqrt: " << args.numSqrt << endl;
//cout << "Num Fma: " << args.numFma << endl;
//cout <<"\n";
#ifdef PSKEL_INT
Stencil2D<Array2D<int>, Mask2D<int>, Arguments> synthetic(inputGrid, outputGrid, mask, args);
#else
Stencil2D<Array2D<float>, Mask2D<float>, Arguments> synthetic(inputGrid, outputGrid, mask, args);
#endif
#ifdef PSKEL_PAPI
if(GPUTime < 1.0)
PSkelPAPI::init(PSkelPAPI::CPU);
#endif
//hrt_stop(&timer_a);
//cout << "Init time: " << hrt_elapsed_time(&timer_a) << endl;
//cout << "Executing" << endl;
hr_timer_t timer;
//double t1,t2;
//t1 = omp_get_wtime();
hrt_start(&timer);
if(GPUTime == 0.0){
#ifdef PSKEL_PAPI
for(unsigned int i=0;i<NUM_GROUPS_CPU;i++){
//cout << "Running iteration " << i << endl;
PSkelPAPI::papi_start(PSkelPAPI::CPU,i);
synthetic.runIterativeCPU(iterations, numCPUThreads);
PSkelPAPI::papi_stop(PSkelPAPI::CPU,i);
}
#else
//cout<<"Running Iterative CPU"<<endl;
synthetic.runIterativeCPU(iterations, numCPUThreads);
#endif
}
else if(GPUTime == 1.0){
synthetic.runIterativeGPU(iterations, GPUBlockSize,GPUBlockSize);
}
else{
#ifdef PSKEL_PAPI
for(unsigned int i=0;i<NUM_GROUPS_CPU;i++){
synthetic.runIterativePartition(iterations, GPUTime, numCPUThreads,GPUBlockSize,i);
}
#else
synthetic.runIterativePartition(iterations, GPUTime, numCPUThreads,GPUBlockSize,GPUBlockSize);
#endif
}
//t2 = omp_get_wtime();
hrt_stop(&timer);
#ifdef PSKEL_PAPI
hipDeviceReset();
if(GPUTime < 1.0){
PSkelPAPI::print_profile_values(PSkelPAPI::CPU);
PSkelPAPI::shutdown();
}
#endif
cout << "Exec_time\t" << hrt_elapsed_time(&timer) << endl;
//cout << "Exec_time_omp\t" << t2-t1 << endl;
if(writeToFile == 1){
cout.precision(12);
cout<<"INPUT"<<endl;
for(int i=10; i<width;i+=10){
cout<<"("<<i<<","<<i<<") = "<<inputGrid(i,i)<<"\t\t("<<width-i<<","<<height-i<<") = "<<inputGrid(width-i,height-i)<<endl;
}
cout<<endl;
cout<<"OUTPUT"<<endl;
for(int i=10; i<width;i+=10){
cout<<"("<<i<<","<<i<<") = "<<outputGrid(i,i)<<"\t\t("<<width-i<<","<<height-i<<") = "<<outputGrid(width-i,height-i)<<endl;
}
cout<<endl;
}
return 0;
}
| 05aecd2d288434c5b98e1cd3763e07cc918cbfa1.cu | #define TIME 1
//#define PSKEL_LOGMODE 1
//#define TBB_USE_DEBUG 1
#define PSKEL_CUDA
#include "PSkel.h"
#include "hr_time.h"
#include <omp.h>
#include <fstream>
#include <string>
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <cmath>
#include <cassert>
#ifndef PSKEL_NEUMAN
#ifndef PSKEL_MOORE
#define PSKEL_NEUMAN
#endif
#else
#ifndef PSKEL_MOORE
#ifndef PSKEL_NEUMAN
#define PSKEL_MOORE
#endif
#endif
#endif
#ifndef NUM_ADD
#define NUM_ADD 1
#endif
#ifndef NUM_MULT
#define NUM_MULT 0
#endif
#ifndef MASK_RADIUS
#define MASK_RADIUS 1
#endif
using namespace std;
using namespace PSkel;
struct Arguments{
//int neighborhood, radius, numAdd, numSub, numMult, numDiv, numPow, numSqrt, numFma;
int numAdd, numMult,radius;
int loopControlAdd, loopControlMult, opControlAdd, opControlMult;
float arg1, arg2, arg3, arg4, arg5, arg6, arg7;
Arguments(){
//neighborhood = 0;
radius = 2;
numAdd = 10;
//numSub = 0;
numMult = 0;
//numDiv = 0;
//numPow = 0;
//numSqrt = 0;
//numFma = 0;
}
//Arguments(int nb, int r, int nAdd, int nSub, int nMult, int nDiv, int nPow, int nSqrt, int nFma){
Arguments(int r, int nAdd, int nMult, int maskSize){
//neighborhood = nb;
radius = r;
numAdd = nAdd;
//numSub = nSub;
numMult = nMult;
//numDiv = nDiv;
//numPow = nPow;
//numSqrt = nSqrt;
//numFma = nFma;
loopControlAdd = numAdd > 0 ? (numAdd-1) /maskSize + 1 : 0;
loopControlMult = numMult > 0 ? (numMult-1)/maskSize + 1 : 0;
opControlAdd = numAdd > 0 ? radius : 0;
opControlMult = numMult > 0 ? radius : 0;
arg1 = (float) rand()/RAND_MAX;
arg2 = (float) rand()/RAND_MAX;
arg3 = (float) rand()/RAND_MAX;
arg4 = (float) rand()/RAND_MAX;
arg5 = (float) rand()/RAND_MAX;
arg6 = (float) rand()/RAND_MAX;
arg7 = (float) rand()/RAND_MAX;
//cout<<"Arguments: "<< arg1 << " " << arg2 << " "<< arg3 << " " << arg4 << " " << arg5 << endl;
}
};
namespace PSkel{
#ifdef PSKEL_INT
__parallel__ void stencilKernel(Array2D<int> &input,Array2D<int> &output,Mask2D<int> &mask,Arguments &args, size_t h, size_t w){
int returnValue; // = input(h,w);
int L1, L2, L3, L4, L5, L6, L7;
#else
__parallel__ void stencilKernel(Array2D<float> &input,Array2D<float> &output,Mask2D<float> &mask,Arguments &args, size_t i, size_t j){
float returnValue; // = input(i,j);
float L1, L2, L3, L4, L5, L6, L7;
#endif
#ifdef PSKEL_NEUMAN
#if MASK_RADIUS == 1
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 1 ADD 1 MULT 1\n");
L1 = input(i-1,j) * args.arg1;
L2 = input(i ,j-1) * args.arg2 + input(i,j+1) * args.arg3;
L3 = input(i+1,j) * args.arg4;
returnValue = L1 + L2 + L3;
#else
//printf("Neuman RADIUS 1 ADD 1 MULT 0\n");
L1 = input(i-1,j);
L2 = input(i ,j-1) + input(i,j+1);
L3 = input(i+1,j);
returnValue = L1 + L2 + L3;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 1 ADD 0 MULT 1\n");
L1 = input(i-1,j);
L2 = input(i ,j-1) * input(i,j+1);
L3 = input(i+1,j);
returnValue = L1 * L2 * L3;
#endif
#elif MASK_RADIUS == 2
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 2 ADD 1 MULT 1\n");
L1 = input(i-2,j) * args.arg1;
L2 = input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4;
L3 = input(i,j-2) * args.arg1 + input(i ,j-1) * args.arg2 + input(i ,j+1) * args.arg3 + input(i,j+2) * args.arg4;
L4 = input(i+1,j-1) * args.arg4 + input(i+1,j) * args.arg5 + input(i+1,j+1) * args.arg6;
L5 = input(i+2,j) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5;
#else
//printf("Neuman RADIUS 2 ADD 1 MULT 0\n");
L1 = input(i-2,j);
L2 = input(i-1,j-1) + input(i-1,j) + input(i-1,j+1);
L3 = input(i,j-2) + input(i ,j-1) + input(i ,j+1) + input(i,j+2);
L4 = input(i+1,j-1) + input(i+1,j) + input(i+1,j+1);
L5 = input(i+2,j);
returnValue = L1 + L2 + L3 + L4 + L5;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 2 ADD 0 MULT 1\n");
L1 = input(i-2,j);
L2 = input(i-1,j-1) * input(i-1,j) * input(i-1,j+1);
L3 = input(i,j-2) * input(i ,j-1) * input(i ,j+1) * input(i,j+2);
L4 = input(i+1,j-1) * input(i+1,j) * input(i+1,j+1);
L5 = input(i+2,j);
returnValue = L1 * L2 * L3 * L4 * L5;
#endif
#else
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Neuman RADIUS 3 ADD 1 MULT 1\n");
L1 = input(i-3,j) * args.arg1;
L2 = input(i-2,j-1) * args.arg1 + input(i-2,j) * args.arg2 + input(i-2,j+1);
L3 = input(i-1,j-2) * args.arg1 + input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4 + input(i-1,j+2) * args.arg5 ;
L4 = input(i,j-3) * args.arg1 + input(i ,j-2) * args.arg2 + input(i ,j-1) * args.arg3 + input(i ,j+1) * args.arg5 + input(i ,j+2) * args.arg6 + input(i,j+3) * args.arg7;
L5 = input(i+1,j-2) * args.arg3 + input(i+1,j-1) * args.arg4 + input(i+1,j) * args.arg5 + input(i+1,j+1) * args.arg6 + input(i+1,j+2) * args.arg7 ;
L6 = input(i+2,j-1) * args.arg5 + input(i+2,j) * args.arg6 + input(i+2,j+1);
L7 = input(i+3,j) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#else
//printf("Neuman RADIUS 3 ADD 1 MULT 0\n");
L1 = input(i-3,j);
L2 = input(i-2,j-1) + input(i-2,j) + input(i-2,j+1);
L3 = input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2);
L4 = input(i,j-3) + input(i ,j-2) + input(i ,j-1) + input(i ,j+1) + input(i ,j+2) + input(i,j+3);
L5 = input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2);
L6 = input(i+2,j-1) + input(i+2,j) + input(i+2,j+1);
L7 = input(i+3,j);
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#endif
#elif NUM_MULT == 1
//printf("Neuman RADIUS 3 ADD 0 MULT 1\n");
L1 = input(i-3,j);
L2 = input(i-2,j-1) * input(i-2,j) * input(i-2,j+1);
L3 = input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2);
L4 = input(i,j-3) * input(i ,j-2) * input(i ,j-1) * input(i ,j+1) * input(i ,j+2) * input(i,j+3);
L5 = input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2);
L6 = input(i+2,j-1) * input(i+2,j) * input(i+2,j+1);
L7 = input(i+3,j);
returnValue = L1 * L2 * L3 * L4 * L5 * L6 * L7;
#endif
#endif
#else
#ifdef PSKEL_MOORE
#if MASK_RADIUS == 1
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 1 ADD 1 MULT 1\n");
L1 = input(i-1,j-1) * args.arg1 + input(i-1,j) * args.arg2 + input(i-1,j+1) * args.arg3;
L2 = input(i, j-1) * args.arg1 + input(i, j) * args.arg2 + input(i, j+1) * args.arg3;
L3 = input(i+1,j-1) * args.arg1 + input(i+1,j) * args.arg2 + input(i+1,j+1) * args.arg3;
returnValue = L1 + L2 + L3;
#else
//printf("Moore RADIUS 1 ADD 1 MULT 0\n");
L1 = input(i-1,j-1) + input(i-1,j) + input(i-1,j+1);
L2 = input(i, j-1) + input(i ,j) + input(i, j+1);
L3 = input(i+1,j-1) + input(i+1,j) + input(i+1,j+1);
returnValue = L1 + L2 + L3;
#endif
#elif NUM_MULT == 1
//printf("Moore RADIUS 1 ADD 0 MULT 1\n");
L1 = input(i-1,j-1) * input(i-1,j) * input(i-1,j+1);
L2 = input(i ,j-1) * input(i ,j) * input(i ,j+1);
L3 = input(i+1,j-1) * input(i+1,j) * input(i+1,j+1);
returnValue = L1 * L2 * L3;
#endif
#elif MASK_RADIUS == 2
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 2 ADD 1 MULT 1\n");
L1 = input(i-2,j-2) * args.arg1 + input(i-2,j-1) * args.arg2 + input(i-2,j) * args.arg3 + input(i-2,j+1) * args.arg4 + input(i-2,j+2) * args.arg5;
L2 = input(i-1,j-2) * args.arg1 + input(i-1,j-1) * args.arg2 + input(i-1,j) * args.arg3 + input(i-1,j+1) * args.arg4 + input(i-1,j+2) * args.arg5;
L3 = input(i ,j-2) * args.arg1 + input(i, j-1) * args.arg2 + input(i ,j) * args.arg3 + input(i, j+1) * args.arg4 + input(i ,j+2) * args.arg5;
L4 = input(i+1,j-2) * args.arg1 + input(i+1,j-1) * args.arg2 + input(i+1,j) * args.arg3 + input(i+1,j+1) * args.arg4 + input(i+1,j+2) * args.arg5;
L5 = input(i+2,j-2) * args.arg1 + input(i+2,j-1) * args.arg2 + input(i+2,j) * args.arg3 + input(i+2,j+1) * args.arg4 + input(i+2,j+2) * args.arg5;
returnValue = L1 + L2 + L3 + L4 + L5;
#else
//printf("Moore RADIUS 2 ADD 1 MULT 0\n");
L1 = input(i-2,j-2) + input(i-2,j-1) + input(i-2,j) + input(i-2,j+1) + input(i-2,j+2);
L2 = input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2);
L3 = input(i ,j-2) + input(i, j-1) + input(i ,j) + input(i, j+1) + input(i ,j+2);
L4 = input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2);
L5 = input(i+2,j-2) + input(i+2,j-1) + input(i+2,j) + input(i+2,j+1) + input(i+2,j+2);
returnValue = L1 + L2 + L3 + L4 + L5;
#endif
#elif NUM_MULT == 1
//printf("Moore RADIUS 2 ADD 0 MULT 1\n");
L1 = input(i-2,j-2) * input(i-2,j-1) * input(i-2,j) * input(i-2,j+1) * input(i-2,j+2);
L2 = input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2);
L3 = input(i ,j-2) * input(i, j-1) * input(i ,j) * input(i, j+1) * input(i ,j+2);
L4 = input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2);
L5 = input(i+2,j-2) * input(i+2,j-1) * input(i+2,j) * input(i+2,j+1) * input(i+2,j+2);
returnValue = L1 * L2 * L3 * L4 * L5;
#endif
#else
#if NUM_ADD == 1
#if NUM_MULT == 1
//printf("Moore RADIUS 3 ADD 1 MULT 1\n");
L1 = input(i-3,j-3) * args.arg1 + input(i-3,j-2) * args.arg2 + input(i-3,j-1) * args.arg3 + input(i-3,j) * args.arg4 + input(i-3,j+1) * args.arg5 + input(i-3,j+2) * args.arg6 + input(i-3,j+3) * args.arg7;
L2 = input(i-2,j-3) * args.arg1 + input(i-2,j-2) * args.arg2 + input(i-2,j-1) * args.arg3 + input(i-2,j) * args.arg4 + input(i-2,j+1) * args.arg5 + input(i-2,j+2) * args.arg6 + input(i-2,j+3) * args.arg7;
L3 = input(i-1,j-3) * args.arg1 + input(i-1,j-2) * args.arg2 + input(i-1,j-1) * args.arg3 + input(i-1,j) * args.arg4 + input(i-1,j+1) * args.arg5 + input(i-1,j+2) * args.arg6 + input(i-1,j+3) * args.arg7;
L4 = input(i ,j-3) * args.arg1 + input(i ,j-2) * args.arg2 + input(i, j-1) * args.arg3 + input(i ,j) * args.arg4 + input(i, j+1) * args.arg5 + input(i ,j+2) * args.arg6 + input(i ,j+3) * args.arg7;
L5 = input(i+1,j-3) * args.arg1 + input(i+1,j-2) * args.arg2 + input(i+1,j-1) * args.arg3 + input(i+1,j) * args.arg4 + input(i+1,j+1) * args.arg5 + input(i+1,j+2) * args.arg6 + input(i+1,j+3) * args.arg7;
L6 = input(i+2,j-3) * args.arg1 + input(i+2,j-2) * args.arg2 + input(i+2,j-1) * args.arg3 + input(i+2,j) * args.arg4 + input(i+2,j+1) * args.arg5 + input(i+2,j+2) * args.arg6 + input(i+2,j+3) * args.arg7;
L7 = input(i+3,j-3) * args.arg1 + input(i+3,j-2) * args.arg2 + input(i+3,j-1) * args.arg3 + input(i+3,j) * args.arg4 + input(i+3,j+1) * args.arg5 + input(i+3,j+2) * args.arg6 + input(i+3,j+3) * args.arg7;
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#else
//printf("Moore RADIUS 3 ADD 1 MULT 0\n");
L1 = input(i-3,j-3) + input(i-3,j-2) + input(i-3,j-1) + input(i-3,j) + input(i-3,j+1) + input(i-3,j+2) + input(i-3,j+3);
L2 = input(i-2,j-3) + input(i-2,j-2) + input(i-2,j-1) + input(i-2,j) + input(i-2,j+1) + input(i-2,j+2) + input(i-2,j+3);
L3 = input(i-1,j-3) + input(i-1,j-2) + input(i-1,j-1) + input(i-1,j) + input(i-1,j+1) + input(i-1,j+2) + input(i-1,j+3);
L4 = input(i ,j-3) + input(i ,j-2) + input(i, j-1) + input(i ,j) + input(i, j+1) + input(i ,j+2) + input(i ,j+3);
L5 = input(i+1,j-3) + input(i+1,j-2) + input(i+1,j-1) + input(i+1,j) + input(i+1,j+1) + input(i+1,j+2) + input(i+1,j+3);
L6 = input(i+2,j-3) + input(i+2,j-2) + input(i+2,j-1) + input(i+2,j) + input(i+2,j+1) + input(i+2,j+2) + input(i+2,j+3);
L7 = input(i+3,j-3) + input(i+3,j-2) + input(i+3,j-1) + input(i+3,j) + input(i+3,j+1) + input(i+3,j+2) + input(i+3,j+3);
returnValue = L1 + L2 + L3 + L4 + L5 + L6 + L7;
#endif
#elif NUM_MULT==1
//printf("Moore RADIUS 3 ADD 0 MULT 1\n");
L1 = input(i-3,j-3) * input(i-3,j-2) * input(i-3,j-1) * input(i-3,j) * input(i-3,j+1) * input(i-3,j+2) * input(i-3,j+3);
L2 = input(i-2,j-3) * input(i-2,j-2) * input(i-2,j-1) * input(i-2,j) * input(i-2,j+1) * input(i-2,j+2) * input(i-2,j+3);
L3 = input(i-1,j-3) * input(i-1,j-2) * input(i-1,j-1) * input(i-1,j) * input(i-1,j+1) * input(i-1,j+2) * input(i-1,j+3);
L4 = input(i ,j-3) * input(i ,j-2) * input(i, j-1) * input(i ,j) * input(i, j+1) * input(i ,j+2) * input(i ,j+3);
L5 = input(i+1,j-3) * input(i+1,j-2) * input(i+1,j-1) * input(i+1,j) * input(i+1,j+1) * input(i+1,j+2) * input(i+1,j+3);
L6 = input(i+2,j-3) * input(i+2,j-2) * input(i+2,j-1) * input(i+2,j) * input(i+2,j+1) * input(i+2,j+2) * input(i+2,j+3);
L7 = input(i+3,j-3) * input(i+3,j-2) * input(i+3,j-1) * input(i+3,j) * input(i+3,j+1) * input(i+3,j+2) * input(i+3,j+3);
returnValue = L1 * L2 * L3 * L4 * L5 * L6 * L7;
#endif
#endif
#endif
#endif
/*int loopControl;
int opControl;
int i,j,k,fim,ini;
//loopControl = args.numAdd > 0 ? (args.numAdd-1)/mask.size + 1 : 0; //estava assim originalmente
//opControl = args.numAdd > mask.size ? mask.size : args.numAdd;
//opControl = args.numAdd > 0 ? args.radius : 0;
loopControl = args.loopControlAdd;
opControl = args.opControlAdd;
#ifdef PSKEL_NEUMAN
for(int i = 0; i < loopControl; i++){
fim = 0;
ini = 0;
k = 0;
for (j = -opControl; j <= 0; j++) {
for(k = ini; k <= fim; k++){
//if(j != 0 || k !=0){
returnValue = returnValue + input(h+j,w+k);
//}
}
ini--;
fim++;
}
ini+=2;
fim-=2;
for(j = 1; j <= opControl; j++){
for(k = ini; k <= fim; k++){
returnValue = returnValue + input(h+j,w+k);
}
ini++;
fim--;
}
}
#else
#ifdef PSKEL_MOORE
//Adição
//loopControl = ceil(float(args.numAdd)/float(mask.size));
//loopControl = args.numAdd > 0 ? (args.numAdd-1)/mask.size + 1 : 0; //estava assim originalmente
//opControl = args.numAdd > mask.size ? mask.size : args.numAdd;
//opControl = args.numAdd > 0 ? args.radius : 0;
//loopControl = loopControl/2;
//printf("Executing ADD loopControl: %d opControl: %d\n",loopControl,opControl);
for(i = 0; i<loopControl; i++){
//for(int i = -loopControl; i <= loopControl; i++){
for(j = -opControl;j <= opControl; j++){
for(k = -opControl; k <= opControl; k++){ //for(int k = 0; k < opControl; k++){
//returnValue = returnValue + mask.get(j,input,h,w);
//returnValue = returnValue + mask.getWeight(j);
returnValue = returnValue + input(h+j,w+k);
}
}
}
#endif
#endif
//Multiplicação
//loopControl = ceil(float(args.numMult)/float(mask.size));
//loopControl = args.numMult > 0 ? (args.numMult-1)/mask.size + 1 : 0;
//opControl = args.numMult > mask.size ? mask.size: args.numMult;
//opControl = args.numMult > 0 ? args.radius : 0;
//loopControl = loopControl/2;
//printf("Executing MULT loopControl: %d opControl: %d\n",loopControl,opControl);
loopControl = args.loopControlMult;
opControl = args.opControlMult;
#ifdef PSKEL_NEUMAN
for(i = 0; i < loopControl; i++){
fim = 0;
ini = 0;
k = 0;
for (j = -opControl; j <= 0; j++) {
for(k = ini; k<= fim; k++){
//if(j != 0 || k !=0){
returnValue = returnValue * input(h+j,w+k);
//}
}
ini--;
fim++;
}
ini+=2;
fim-=2;
for(j = 1; j <= opControl; j++){
for(k = ini; k <= fim; k++){
returnValue = returnValue * input(h+j,w+k);
}
ini++;
fim--;
}
}
#else
#ifdef PSKEL_MOORE
for(i = 0; i<loopControl; i++){
for(j = -opControl; j <= opControl; j++){
for(k = -opControl; k <= opControl; k++){
//returnValue = returnValue * mask.get(j,input,h,w);
//returnValue = returnValue * mask.getWeight(j);
returnValue = returnValue * input(h+j,w+k);
}
}
}
#endif
#endif
*/
//Divisao
/*loopControl = (args.numDiv-1)/mask.size + 1;
opControl = args.numDiv>mask.size?mask.size:args.numDiv;
for(int i = 0; i<loopControl; i++){
for(int j = 0; j<opControl; j++){
returnValue = returnValue / mask.get(j,input,h,w);
}
}
*/
output(i,j) = returnValue;
}
}
int main(int argc, char **argv){
//hr_timer_t timer_a;
//hrt_start(&timer_a);
int width, height, iterations, maskType,maskRange,GPUBlockSize, numCPUThreads, maskSize,writeToFile;
int nAdd, nMult;
//int nSub, nDiv, nPow, nFma;
float GPUTime;
if (argc != 11){
printf ("Wrong number of parameters.\n");
//printf ("Usage: synthetic WIDTH HEIGHT ITERATIONS GPUTIME GPUBLOCKS CPUTHREADS MASKTYPE MASKRANGE NumADDS NumSUBS NumMults NumDivs NumPows NumSqrts NumFmas OUTPUT_WRITE_FLAG\n");
printf ("Usage: synthetic WIDTH HEIGHT ITERATIONS GPUTIME GPUBLOCKS CPUTHREADS MASKRANGE NumADDS NumMults OUTPUT_WRITE_FLAG\n"); //Masktype is now defined from #ifdef
exit(-1);
}
srand(time(NULL));
width = atoi (argv[1]);
height = atoi (argv[2]);
iterations = atoi (argv[3]);
GPUTime = atof(argv[4]);
GPUBlockSize = atoi(argv[5]);
numCPUThreads = atoi(argv[6]);
#ifdef PSKEL_NEUMAN
maskType = 0;
//cout<<"Neuman"<<endl;
#else
maskType = 1;
//cout<<"Moore"<<endl;
#endif
//maskRange = atoi (argv[7]);
maskRange = MASK_RADIUS;
//nAdd = atoi(argv[8]) ;
nAdd = NUM_ADD;
//nSub = 0; //atoi(argv[10]);
//nMult = atoi(argv[9]) ;
nMult = NUM_MULT;
//nDiv = 0; //atoi(argv[12]);
//nPow = 0; //atoi(argv[13]);
//nSqrt = 0; //atoi(argv[14]);
//nFma = 0; //atoi(argv[15]);
writeToFile = atoi(argv[10]);
if(nAdd == 0 && nMult == 0){
printf("The number of Adds and Mults are 0!\n");
exit(-1);
}
#ifdef PSKEL_INT
Array2D<int> inputGrid(width, height);
Array2D<int> outputGrid(width, height);
#else
Array2D<float> inputGrid(width, height);
Array2D<float> outputGrid(width, height);
#endif
/*for(int h=0; h<inputGrid.getHeight(); h++)
for(int w=0; w<inputGrid.getWidth(); w++)
inputGrid(h,w) = h*inputGrid.getWidth()+w;
*/
#pragma omp parallel num_threads(numCPUThreads)
{
unsigned int seed = 1234 + 17 * omp_get_thread_num();
#pragma omp for
for (int x = 0; x < height; x++){
for (int y = 0; y < width; y++){
//#ifdef PSKEL_INT
//inputGrid(x,y) = 1 + rand()%99;
//outputGrid(x,y) = 1;
// #else
inputGrid(x,y) = (1.0 + rand_r(&seed)%9) + 1.0/(1+rand_r(&seed)%100);
outputGrid(x,y) = 1.0;
// #endif
}
}
}
//Calculate the mask size based on neighborhood type. 0 (zero) for Von Neumann >1 (more than one) for Moore.
if(maskType == 0){
//Neumann number 2r(r+1)+1,
//maskSize = ((2 * args.radius)*args.radius) + (2 * args.radius) + 1;
maskSize = 1 + ((2 * maskRange ) * ( maskRange + 1));
}else{
//Moore (2r + 1)^2
maskSize = (2 * maskRange + 1) * (2 * maskRange + 1);
}
//maskSize = number of cells in neighborhood - 1 (the center cell);
#ifdef PSKEL_INT
Mask2D<int> mask(maskSize - 1,1);
#else
Mask2D<float> mask(maskSize - 1,1.0);
#endif
nAdd = nAdd * (maskSize - 1);
//nSub = 0; //atoi(argv[10]);
nMult = nMult * (maskSize - 1);
//Arguments args(maskType, maskRange, nAdd, nSub, nMult, nDiv, nPow, nSqrt, nFma);
Arguments args(maskRange,nAdd, nMult,maskSize);
srand(1234);
//cout<<"MASK VALUES"<<endl;
if(maskType == 0){
//Set mask for Neumann neighborhood
int idx = 0;
int fim = 0;
int ini = 0;
int w = 0;
int h;
for (h = -maskRange; h <= 0; h++) {
for(w = ini; w <= fim; w++){
if(h != 0 || w !=0){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<<" "<<weight<<endl;
//cout << idx <<", "<< h <<", "<< w <<"\n";
idx++;
}
}
ini --;
fim ++;
}
ini+=2;
fim-=2;
for(h = 1; h <= maskRange; h++){
for(w = ini; w <= fim; w++){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<< " "<<weight<<endl;
idx++;
}
ini++;
fim--;
}
}
else{
//Set mask for Moore neighborhood
int idx = 0;
int h, w;
for(h = -maskRange; h <= maskRange; h++){
for(w = -maskRange; w <= maskRange; w ++){
if(h != 0 || w != 0){
float weight = 0.1*(1+rand()%8);
#ifdef PSKEL_INT
weight += 1 + rand()%3;
#endif
mask.set(idx, h, w, weight);
//cout<<"["<<idx<<"] = "<<h<<","<<w<< " "<<weight<<endl;
idx ++;
}
}
}
}
//cout <<"\n";
//cout << "Width: " << width << "; Height: " << height << ";\n";
//cout << "Iterations: " << iterations << endl;
//cout << "MaskType: " << maskType << endl;
//cout << "MaskRange: " << maskRange << endl;
//cout << "Neighbors: " << mask.size << endl;
//cout << "GPU Time: " << GPUTime << endl;
//cout << "GPU Block size: " << GPUBlockSize << endl;
//cout << "CPU Threads: " << numCPUThreads << endl;
//cout << "Num Add: " << args.numAdd << endl;
//cout << "Num Sub: " << args.numSub << endl;
//cout << "Num Mult: " << args.numMult << endl;
//cout << "Num Div: " << args.numDiv << endl;
//cout << "Num Pow: " << args.numPow << endl;
//cout << "Num Sqrt: " << args.numSqrt << endl;
//cout << "Num Fma: " << args.numFma << endl;
//cout <<"\n";
#ifdef PSKEL_INT
Stencil2D<Array2D<int>, Mask2D<int>, Arguments> synthetic(inputGrid, outputGrid, mask, args);
#else
Stencil2D<Array2D<float>, Mask2D<float>, Arguments> synthetic(inputGrid, outputGrid, mask, args);
#endif
#ifdef PSKEL_PAPI
if(GPUTime < 1.0)
PSkelPAPI::init(PSkelPAPI::CPU);
#endif
//hrt_stop(&timer_a);
//cout << "Init time: " << hrt_elapsed_time(&timer_a) << endl;
//cout << "Executing" << endl;
hr_timer_t timer;
//double t1,t2;
//t1 = omp_get_wtime();
hrt_start(&timer);
if(GPUTime == 0.0){
#ifdef PSKEL_PAPI
for(unsigned int i=0;i<NUM_GROUPS_CPU;i++){
//cout << "Running iteration " << i << endl;
PSkelPAPI::papi_start(PSkelPAPI::CPU,i);
synthetic.runIterativeCPU(iterations, numCPUThreads);
PSkelPAPI::papi_stop(PSkelPAPI::CPU,i);
}
#else
//cout<<"Running Iterative CPU"<<endl;
synthetic.runIterativeCPU(iterations, numCPUThreads);
#endif
}
else if(GPUTime == 1.0){
synthetic.runIterativeGPU(iterations, GPUBlockSize,GPUBlockSize);
}
else{
#ifdef PSKEL_PAPI
for(unsigned int i=0;i<NUM_GROUPS_CPU;i++){
synthetic.runIterativePartition(iterations, GPUTime, numCPUThreads,GPUBlockSize,i);
}
#else
synthetic.runIterativePartition(iterations, GPUTime, numCPUThreads,GPUBlockSize,GPUBlockSize);
#endif
}
//t2 = omp_get_wtime();
hrt_stop(&timer);
#ifdef PSKEL_PAPI
cudaDeviceReset();
if(GPUTime < 1.0){
PSkelPAPI::print_profile_values(PSkelPAPI::CPU);
PSkelPAPI::shutdown();
}
#endif
cout << "Exec_time\t" << hrt_elapsed_time(&timer) << endl;
//cout << "Exec_time_omp\t" << t2-t1 << endl;
if(writeToFile == 1){
cout.precision(12);
cout<<"INPUT"<<endl;
for(int i=10; i<width;i+=10){
cout<<"("<<i<<","<<i<<") = "<<inputGrid(i,i)<<"\t\t("<<width-i<<","<<height-i<<") = "<<inputGrid(width-i,height-i)<<endl;
}
cout<<endl;
cout<<"OUTPUT"<<endl;
for(int i=10; i<width;i+=10){
cout<<"("<<i<<","<<i<<") = "<<outputGrid(i,i)<<"\t\t("<<width-i<<","<<height-i<<") = "<<outputGrid(width-i,height-i)<<endl;
}
cout<<endl;
}
return 0;
}
|
525b23047e99d8b9a1ea049684f7524c89b79e87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel(int* D, int* q, int k){
// Find index of i row and j column of the distance array
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(D[i * N + j] > D[i * N + k] + D[k * N + j])
{
D[i * N + j] = D[i * N + k] + D[k * N + j];
q[i * N + j] = k;
}
} | 525b23047e99d8b9a1ea049684f7524c89b79e87.cu | #include "includes.h"
__global__ void kernel(int* D, int* q, int k){
// Find index of i row and j column of the distance array
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(D[i * N + j] > D[i * N + k] + D[k * N + j])
{
D[i * N + j] = D[i * N + k] + D[k * N + j];
q[i * N + j] = k;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.